diff --git a/CHANGELOG.md b/CHANGELOG.md index 8443279fb..a6ccb03c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). ### Added - #792 - added `CROSS_CONTAINER_IN_CONTAINER` environment variable to replace `CROSS_DOCKER_IN_DOCKER`. +- #785 - added support for remote container engines through data volumes. also adds in utility to commands to create and remove persistent data volumes. - #782 - added `build-std` config option, which builds the rust standard library from source if enabled. - #775 - forward Cargo exit code to host - #772 - added `CROSS_CONTAINER_OPTS` environment variable to replace `DOCKER_OPTS`. diff --git a/Cargo.lock b/Cargo.lock index f15812f40..4446c8278 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -160,8 +160,10 @@ dependencies = [ "serde", "serde_ignored", "serde_json", + "sha1_smol", "shell-escape", "shell-words", + "tempfile", "thiserror", "toml", "walkdir", @@ -191,6 +193,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + [[package]] name = "gimli" version = "0.26.1" @@ -243,6 +254,15 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "itoa" version = "1.0.1" @@ -368,6 +388,15 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "redox_syscall" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +dependencies = [ + "bitflags", +] + [[package]] name = "regex" version = "1.5.5" @@ -385,6 +414,15 @@ version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -473,6 +511,12 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + [[package]] name = "sharded-slab" version = "0.1.4" @@ -511,6 +555,20 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + [[package]] name = "termcolor" version = "1.1.3" diff --git a/Cargo.toml b/Cargo.toml index fe254fb55..0f33837dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,6 +31,9 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" serde_ignored = "0.1.2" shell-words = "1.1.0" +walkdir = { version = "2", optional = true } +sha1_smol = "1.0.0" +tempfile = "3.3.0" [target.'cfg(not(windows))'.dependencies] nix = { version = "0.24", default-features = false, features = ["user"] } diff --git a/src/bin/commands/containers.rs b/src/bin/commands/containers.rs new file mode 100644 index 000000000..f6541809f --- /dev/null +++ b/src/bin/commands/containers.rs @@ -0,0 +1,370 @@ +use std::path::Path; + +use atty::Stream; +use clap::Args; +use cross::{CommandExt, VersionMetaExt}; + +#[derive(Args, Debug)] +pub struct ListVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Args, Debug)] +pub struct RemoveVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Force removal of volumes. + #[clap(short, long)] + pub force: bool, + /// Remove volumes. Default is a dry run. + #[clap(short, long)] + pub execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Args, Debug)] +pub struct PruneVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Args, Debug)] +pub struct CreateCrateVolume { + /// Triple for the target platform. + #[clap(long)] + pub target: String, + /// If cross is running inside a container. + #[clap(short, long)] + pub docker_in_docker: bool, + /// If we should copy the cargo registry to the volume. + #[clap(short, long)] + pub copy_registry: bool, + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Args, Debug)] +pub struct RemoveCrateVolume { + /// Triple for the target platform. + #[clap(long)] + pub target: String, + /// If cross is running inside a container. + #[clap(short, long)] + pub docker_in_docker: bool, + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Args, Debug)] +pub struct ListContainers { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Args, Debug)] +pub struct RemoveContainers { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Force removal of containers. + #[clap(short, long)] + pub force: bool, + /// Remove containers. Default is a dry run. + #[clap(short, long)] + pub execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +fn get_cross_volumes(engine: &cross::Engine, verbose: bool) -> cross::Result> { + let stdout = cross::docker_subcommand(engine, "volume") + .arg("list") + .arg("--format") + .arg("{{.Name}}") + .arg("--filter") + // handles simple regex: ^ for start of line. + .arg("name=^cross-") + .run_and_get_stdout(verbose)?; + + let mut volumes: Vec = stdout.lines().map(|s| s.to_string()).collect(); + volumes.sort(); + + Ok(volumes) +} + +pub fn list_volumes( + ListVolumes { verbose, .. }: ListVolumes, + engine: &cross::Engine, +) -> cross::Result<()> { + get_cross_volumes(engine, verbose)? + .iter() + .for_each(|line| println!("{}", line)); + + Ok(()) +} + +pub fn remove_volumes( + RemoveVolumes { + verbose, + force, + execute, + .. + }: RemoveVolumes, + engine: &cross::Engine, +) -> cross::Result<()> { + let volumes = get_cross_volumes(engine, verbose)?; + + let mut command = cross::docker_subcommand(engine, "volume"); + command.arg("rm"); + if force { + command.arg("--force"); + } + command.args(&volumes); + if execute { + command.run(verbose).map_err(Into::into) + } else { + println!("{:?}", command); + Ok(()) + } +} + +pub fn prune_volumes( + PruneVolumes { verbose, .. }: PruneVolumes, + engine: &cross::Engine, +) -> cross::Result<()> { + cross::docker_subcommand(engine, "volume") + .args(&["prune", "--force"]) + .run_and_get_status(verbose)?; + + Ok(()) +} + +fn get_package_info( + engine: &cross::Engine, + target: &str, + channel: Option<&str>, + docker_in_docker: bool, + verbose: bool, +) -> cross::Result<(cross::Target, cross::CargoMetadata, cross::Directories)> { + let target_list = cross::target_list(false)?; + let target = cross::Target::from(target, &target_list); + let metadata = cross::cargo_metadata_with_args(None, None, verbose)? + .ok_or(eyre::eyre!("unable to get project metadata"))?; + let cwd = std::env::current_dir()?; + let host_meta = cross::version_meta()?; + let host = host_meta.host(); + let sysroot = cross::get_sysroot(&host, &target, channel, verbose)?.1; + let dirs = + cross::Directories::create(engine, &metadata, &cwd, &sysroot, docker_in_docker, verbose)?; + + Ok((target, metadata, dirs)) +} + +pub fn create_crate_volume( + CreateCrateVolume { + target, + docker_in_docker, + copy_registry, + verbose, + .. + }: CreateCrateVolume, + engine: &cross::Engine, + channel: Option<&str>, +) -> cross::Result<()> { + let (target, metadata, dirs) = + get_package_info(engine, &target, channel, docker_in_docker, verbose)?; + let container = cross::container_identifier(&target, &metadata, &dirs)?; + let volume = format!("{container}-keep"); + + if cross::volume_exists(engine, &volume, verbose)? { + eyre::bail!("error: volume {volume} already exists."); + } + + cross::docker_subcommand(engine, "volume") + .args(&["create", &volume]) + .run_and_get_status(verbose)?; + + // stop the container if it's already running + let state = cross::container_state(engine, &container, verbose)?; + if !state.is_stopped() { + eprintln!("warning: container {container} was running."); + cross::container_stop(engine, &container, verbose)?; + } + if state.exists() { + eprintln!("warning: container {container} was exited."); + cross::container_rm(engine, &container, verbose)?; + } + + // create a dummy running container to copy data over + let mount_prefix = Path::new("/cross"); + let mut docker = cross::docker_subcommand(engine, "run"); + docker.args(&["--name", &container]); + docker.args(&["-v", &format!("{}:{}", volume, mount_prefix.display())]); + docker.arg("-d"); + if atty::is(Stream::Stdin) && atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { + docker.arg("-t"); + } + docker.arg("ubuntu:16.04"); + // ensure the process never exits until we stop it + docker.args(&["sh", "-c", "sleep infinity"]); + docker.run_and_get_status(verbose)?; + + cross::copy_volume_container_xargo( + engine, + &container, + &dirs.xargo, + &target, + mount_prefix, + verbose, + )?; + cross::copy_volume_container_cargo( + engine, + &container, + &dirs.cargo, + mount_prefix, + copy_registry, + verbose, + )?; + cross::copy_volume_container_rust( + engine, + &container, + &dirs.sysroot, + &target, + mount_prefix, + verbose, + )?; + + cross::container_stop(engine, &container, verbose)?; + cross::container_rm(engine, &container, verbose)?; + + Ok(()) +} + +pub fn remove_crate_volume( + RemoveCrateVolume { + target, + docker_in_docker, + verbose, + .. + }: RemoveCrateVolume, + engine: &cross::Engine, + channel: Option<&str>, +) -> cross::Result<()> { + let (target, metadata, dirs) = + get_package_info(engine, &target, channel, docker_in_docker, verbose)?; + let container = cross::container_identifier(&target, &metadata, &dirs)?; + let volume = format!("{container}-keep"); + + if !cross::volume_exists(engine, &volume, verbose)? { + eyre::bail!("error: volume {volume} does not exist."); + } + + cross::volume_rm(engine, &volume, verbose)?; + + Ok(()) +} + +fn get_cross_containers(engine: &cross::Engine, verbose: bool) -> cross::Result> { + let stdout = cross::docker_subcommand(engine, "ps") + .arg("-a") + .arg("--format") + .arg("{{.Names}}: {{.State}}") + .arg("--filter") + // handles simple regex: ^ for start of line. + .arg("name=^cross-") + .run_and_get_stdout(verbose)?; + + let mut containers: Vec = stdout.lines().map(|s| s.to_string()).collect(); + containers.sort(); + + Ok(containers) +} + +pub fn list_containers( + ListContainers { verbose, .. }: ListContainers, + engine: &cross::Engine, +) -> cross::Result<()> { + get_cross_containers(engine, verbose)? + .iter() + .for_each(|line| println!("{}", line)); + + Ok(()) +} + +pub fn remove_containers( + RemoveContainers { + verbose, + force, + execute, + .. + }: RemoveContainers, + engine: &cross::Engine, +) -> cross::Result<()> { + let containers = get_cross_containers(engine, verbose)?; + let mut running = vec![]; + let mut stopped = vec![]; + for container in containers.iter() { + // cannot fail, formatted as {{.Names}}: {{.State}} + let (name, state) = container.split_once(':').unwrap(); + let name = name.trim(); + let state = cross::ContainerState::new(state.trim())?; + if state.is_stopped() { + stopped.push(name); + } else { + running.push(name); + } + } + + let mut commands = vec![]; + if !running.is_empty() { + let mut stop = cross::docker_subcommand(engine, "stop"); + stop.args(&running); + commands.push(stop); + } + + if !(stopped.is_empty() && running.is_empty()) { + let mut rm = cross::docker_subcommand(engine, "rm"); + if force { + rm.arg("--force"); + } + rm.args(&running); + rm.args(&stopped); + commands.push(rm); + } + if execute { + for mut command in commands { + command.run(verbose)?; + } + } else { + for command in commands { + println!("{:?}", command); + } + } + + Ok(()) +} diff --git a/src/bin/commands/images.rs b/src/bin/commands/images.rs new file mode 100644 index 000000000..a0a3c3b09 --- /dev/null +++ b/src/bin/commands/images.rs @@ -0,0 +1,213 @@ +use clap::Args; +use cross::CommandExt; + +// known image prefixes, with their registry +// the docker.io registry can also be implicit +const GHCR_IO: &str = cross::CROSS_IMAGE; +const RUST_EMBEDDED: &str = "rustembedded/cross:"; +const DOCKER_IO: &str = "docker.io/rustembedded/cross:"; +const IMAGE_PREFIXES: &[&str] = &[GHCR_IO, DOCKER_IO, RUST_EMBEDDED]; + +#[derive(Args, Debug)] +pub struct ListImages { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Args, Debug)] +pub struct RemoveImages { + /// If not provided, remove all images. + pub targets: Vec, + /// Remove images matching provided targets. + #[clap(short, long)] + pub verbose: bool, + /// Force removal of images. + #[clap(short, long)] + pub force: bool, + /// Remove local (development) images. + #[clap(short, long)] + pub local: bool, + /// Remove images. Default is a dry run. + #[clap(short, long)] + pub execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] +struct Image { + repository: String, + tag: String, + // need to remove images by ID, not just tag + id: String, +} + +impl Image { + fn name(&self) -> String { + format!("{}:{}", self.repository, self.tag) + } +} + +fn parse_image(image: &str) -> Image { + // this cannot panic: we've formatted our image list as `${repo}:${tag} ${id}` + let (repository, rest) = image.split_once(':').unwrap(); + let (tag, id) = rest.split_once(' ').unwrap(); + Image { + repository: repository.to_string(), + tag: tag.to_string(), + id: id.to_string(), + } +} + +fn is_cross_image(repository: &str) -> bool { + IMAGE_PREFIXES.iter().any(|i| repository.starts_with(i)) +} + +fn is_local_image(tag: &str) -> bool { + tag.starts_with("local") +} + +fn get_cross_images( + engine: &cross::Engine, + verbose: bool, + local: bool, +) -> cross::Result> { + let stdout = cross::docker_subcommand(engine, "images") + .arg("--format") + .arg("{{.Repository}}:{{.Tag}} {{.ID}}") + .run_and_get_stdout(verbose)?; + + let mut images: Vec = stdout + .lines() + .map(parse_image) + .filter(|image| is_cross_image(&image.repository)) + .filter(|image| local || !is_local_image(&image.tag)) + .collect(); + images.sort(); + + Ok(images) +} + +// the old rustembedded targets had the following format: +// repository = (${registry}/)?rustembedded/cross +// tag = ${target}(-${version})? +// the last component must match `[A-Za-z0-9_-]` and +// we must have at least 3 components. the first component +// may contain other characters, such as `thumbv8m.main-none-eabi`. +fn rustembedded_target(tag: &str) -> String { + let is_target_char = |c: char| c == '_' || c.is_ascii_alphanumeric(); + let mut components = vec![]; + for (index, component) in tag.split('-').enumerate() { + if index <= 2 || (!component.is_empty() && component.chars().all(is_target_char)) { + components.push(component) + } else { + break; + } + } + + components.join("-") +} + +fn get_image_target(image: &Image) -> cross::Result { + if let Some(stripped) = image.repository.strip_prefix(GHCR_IO) { + Ok(stripped.to_string()) + } else if let Some(tag) = image.tag.strip_prefix(RUST_EMBEDDED) { + Ok(rustembedded_target(tag)) + } else if let Some(tag) = image.tag.strip_prefix(DOCKER_IO) { + Ok(rustembedded_target(tag)) + } else { + eyre::bail!("cannot get target for image {}", image.name()) + } +} + +pub fn list_images( + ListImages { verbose, .. }: ListImages, + engine: &cross::Engine, +) -> cross::Result<()> { + get_cross_images(engine, verbose, true)? + .iter() + .for_each(|line| println!("{}", line.name())); + + Ok(()) +} + +fn remove_images( + engine: &cross::Engine, + images: &[&str], + verbose: bool, + force: bool, + execute: bool, +) -> cross::Result<()> { + let mut command = cross::docker_subcommand(engine, "rmi"); + if force { + command.arg("--force"); + } + command.args(images); + if execute { + command.run(verbose).map_err(Into::into) + } else { + println!("{:?}", command); + Ok(()) + } +} + +pub fn remove_all_images( + RemoveImages { + verbose, + force, + local, + execute, + .. + }: RemoveImages, + engine: &cross::Engine, +) -> cross::Result<()> { + let images = get_cross_images(engine, verbose, local)?; + let ids: Vec<&str> = images.iter().map(|i| i.id.as_ref()).collect(); + remove_images(engine, &ids, verbose, force, execute) +} + +pub fn remove_target_images( + RemoveImages { + targets, + verbose, + force, + local, + execute, + .. + }: RemoveImages, + engine: &cross::Engine, +) -> cross::Result<()> { + let images = get_cross_images(engine, verbose, local)?; + let mut ids = vec![]; + for image in images.iter() { + let target = get_image_target(image)?; + if targets.contains(&target) { + ids.push(image.id.as_ref()); + } + } + remove_images(engine, &ids, verbose, force, execute) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_rustembedded_target() { + let targets = [ + "x86_64-unknown-linux-gnu", + "x86_64-apple-darwin", + "thumbv8m.main-none-eabi", + ]; + for target in targets { + let versioned = format!("{target}-0.2.1"); + assert_eq!(rustembedded_target(target), target.to_string()); + assert_eq!(rustembedded_target(&versioned), target.to_string()); + } + } +} diff --git a/src/bin/commands/mod.rs b/src/bin/commands/mod.rs new file mode 100644 index 000000000..aa80a62f4 --- /dev/null +++ b/src/bin/commands/mod.rs @@ -0,0 +1,5 @@ +mod containers; +mod images; + +pub use self::containers::*; +pub use self::images::*; diff --git a/src/bin/cross-util.rs b/src/bin/cross-util.rs index 8cbb4a11e..b95a0e749 100644 --- a/src/bin/cross-util.rs +++ b/src/bin/cross-util.rs @@ -1,21 +1,15 @@ #![deny(missing_debug_implementations, rust_2018_idioms)] -use std::path::{Path, PathBuf}; -use std::process::Command; - use clap::{Parser, Subcommand}; -use cross::CommandExt; -// known image prefixes, with their registry -// the docker.io registry can also be implicit -const GHCR_IO: &str = "ghcr.io/cross-rs/"; -const RUST_EMBEDDED: &str = "rustembedded/cross:"; -const DOCKER_IO: &str = "docker.io/rustembedded/cross:"; -const IMAGE_PREFIXES: &[&str] = &[GHCR_IO, DOCKER_IO, RUST_EMBEDDED]; +mod commands; #[derive(Parser, Debug)] #[clap(version, about, long_about = None)] struct Cli { + /// Toolchain name/version to use (such as stable or 1.59.0). + #[clap(value_parser = is_toolchain)] + toolchain: Option, #[clap(subcommand)] command: Commands, } @@ -23,230 +17,87 @@ struct Cli { #[derive(Subcommand, Debug)] enum Commands { /// List cross images in local storage. - ListImages { - /// Provide verbose diagnostic output. - #[clap(short, long)] - verbose: bool, - /// Container engine (such as docker or podman). - #[clap(long)] - engine: Option, - }, + ListImages(commands::ListImages), /// Remove cross images in local storage. - RemoveImages { - /// If not provided, remove all images. - targets: Vec, - /// Remove images matching provided targets. - #[clap(short, long)] - verbose: bool, - /// Force removal of images. - #[clap(short, long)] - force: bool, - /// Remove local (development) images. - #[clap(short, long)] - local: bool, - /// Remove images. Default is a dry run. - #[clap(short, long)] - execute: bool, - /// Container engine (such as docker or podman). - #[clap(long)] - engine: Option, - }, -} - -#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] -struct Image { - repository: String, - tag: String, - // need to remove images by ID, not just tag - id: String, -} - -impl Image { - fn name(&self) -> String { - format!("{}:{}", self.repository, self.tag) - } -} - -fn get_container_engine(engine: Option<&str>) -> Result { - if let Some(ce) = engine { - which::which(ce) - } else { - cross::get_container_engine() - } -} - -fn parse_image(image: &str) -> Image { - // this cannot panic: we've formatted our image list as `${repo}:${tag} ${id}` - let (repository, rest) = image.split_once(':').unwrap(); - let (tag, id) = rest.split_once(' ').unwrap(); - Image { - repository: repository.to_string(), - tag: tag.to_string(), - id: id.to_string(), - } -} - -fn is_cross_image(repository: &str) -> bool { - IMAGE_PREFIXES.iter().any(|i| repository.starts_with(i)) -} - -fn is_local_image(tag: &str) -> bool { - tag.starts_with("local") -} - -fn get_cross_images(engine: &Path, verbose: bool, local: bool) -> cross::Result> { - let stdout = Command::new(engine) - .arg("images") - .arg("--format") - .arg("{{.Repository}}:{{.Tag}} {{.ID}}") - .run_and_get_stdout(verbose)?; - - let mut images: Vec = stdout - .lines() - .map(parse_image) - .filter(|image| is_cross_image(&image.repository)) - .filter(|image| local || !is_local_image(&image.tag)) - .collect(); - images.sort(); - - Ok(images) -} - -// the old rustembedded targets had the following format: -// repository = (${registry}/)?rustembedded/cross -// tag = ${target}(-${version})? -// the last component must match `[A-Za-z0-9_-]` and -// we must have at least 3 components. the first component -// may contain other characters, such as `thumbv8m.main-none-eabi`. -fn rustembedded_target(tag: &str) -> String { - let is_target_char = |c: char| c == '_' || c.is_ascii_alphanumeric(); - let mut components = vec![]; - for (index, component) in tag.split('-').enumerate() { - if index <= 2 || (!component.is_empty() && component.chars().all(is_target_char)) { - components.push(component) - } else { - break; - } - } - - components.join("-") -} - -fn get_image_target(image: &Image) -> cross::Result { - if let Some(stripped) = image.repository.strip_prefix(GHCR_IO) { - Ok(stripped.to_string()) - } else if let Some(tag) = image.tag.strip_prefix(RUST_EMBEDDED) { - Ok(rustembedded_target(tag)) - } else if let Some(tag) = image.tag.strip_prefix(DOCKER_IO) { - Ok(rustembedded_target(tag)) + RemoveImages(commands::RemoveImages), + /// List cross data volumes in local storage. + ListVolumes(commands::ListVolumes), + /// Remove cross data volumes in local storage. + RemoveVolumes(commands::RemoveVolumes), + /// Prune volumes not used by any container. + PruneVolumes(commands::PruneVolumes), + /// Create a persistent data volume for the current crate. + CreateCrateVolume(commands::CreateCrateVolume), + /// Remove a persistent data volume for the current crate. + RemoveCrateVolume(commands::RemoveCrateVolume), + /// List cross containers in local storage. + ListContainers(commands::ListContainers), + /// Stop and remove cross containers in local storage. + RemoveContainers(commands::RemoveContainers), +} + +fn is_toolchain(toolchain: &str) -> cross::Result { + if toolchain.starts_with('+') { + Ok(toolchain.chars().skip(1).collect()) } else { - eyre::bail!("cannot get target for image {}", image.name()) + eyre::bail!("not a toolchain") } } -fn list_images(engine: &Path, verbose: bool) -> cross::Result<()> { - get_cross_images(engine, verbose, true)? - .iter() - .for_each(|line| println!("{}", line.name())); - - Ok(()) -} - -fn remove_images( - engine: &Path, - images: &[&str], - verbose: bool, - force: bool, - execute: bool, -) -> cross::Result<()> { - let mut command = Command::new(engine); - command.arg("rmi"); - if force { - command.arg("--force"); - } - command.args(images); - if execute { - command.run(verbose).map_err(Into::into) +fn get_container_engine(engine: Option<&str>, verbose: bool) -> cross::Result { + let engine = if let Some(ce) = engine { + which::which(ce)? } else { - println!("{:?}", command); - Ok(()) - } -} - -fn remove_all_images( - engine: &Path, - verbose: bool, - force: bool, - local: bool, - execute: bool, -) -> cross::Result<()> { - let images = get_cross_images(engine, verbose, local)?; - let ids: Vec<&str> = images.iter().map(|i| i.id.as_ref()).collect(); - remove_images(engine, &ids, verbose, force, execute) -} - -fn remove_target_images( - engine: &Path, - targets: &[String], - verbose: bool, - force: bool, - local: bool, - execute: bool, -) -> cross::Result<()> { - let images = get_cross_images(engine, verbose, local)?; - let mut ids = vec![]; - for image in images.iter() { - let target = get_image_target(image)?; - if targets.contains(&target) { - ids.push(image.id.as_ref()); - } - } - remove_images(engine, &ids, verbose, force, execute) + cross::get_container_engine()? + }; + cross::Engine::from_path(engine, true, verbose) } pub fn main() -> cross::Result<()> { cross::install_panic_hook()?; let cli = Cli::parse(); - match &cli.command { - Commands::ListImages { verbose, engine } => { - let engine = get_container_engine(engine.as_deref())?; - list_images(&engine, *verbose)?; + match cli.command { + Commands::ListImages(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::list_images(args, &engine)?; } - Commands::RemoveImages { - targets, - verbose, - force, - local, - execute, - engine, - } => { - let engine = get_container_engine(engine.as_deref())?; - if targets.is_empty() { - remove_all_images(&engine, *verbose, *force, *local, *execute)?; + Commands::RemoveImages(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + if args.targets.is_empty() { + commands::remove_all_images(args, &engine)?; } else { - remove_target_images(&engine, targets, *verbose, *force, *local, *execute)?; + commands::remove_target_images(args, &engine)?; } } + Commands::ListVolumes(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::list_volumes(args, &engine)?; + } + Commands::RemoveVolumes(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::remove_volumes(args, &engine)?; + } + Commands::PruneVolumes(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::prune_volumes(args, &engine)?; + } + Commands::CreateCrateVolume(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::create_crate_volume(args, &engine, cli.toolchain.as_deref())?; + } + Commands::RemoveCrateVolume(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::remove_crate_volume(args, &engine, cli.toolchain.as_deref())?; + } + Commands::ListContainers(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::list_containers(args, &engine)?; + } + Commands::RemoveContainers(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + commands::remove_containers(args, &engine)?; + } } Ok(()) } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_rustembedded_target() { - let targets = [ - "x86_64-unknown-linux-gnu", - "x86_64-apple-darwin", - "thumbv8m.main-none-eabi", - ]; - for target in targets { - let versioned = format!("{target}-0.2.1"); - assert_eq!(rustembedded_target(target), target.to_string()); - assert_eq!(rustembedded_target(&versioned), target.to_string()); - } - } -} diff --git a/src/cli.rs b/src/cli.rs index ad8d6fa6a..27b8542b1 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -16,6 +16,7 @@ pub struct Args { pub target_dir: Option, pub docker_in_docker: bool, pub enable_doctests: bool, + pub is_remote: bool, pub manifest_path: Option, } @@ -150,6 +151,9 @@ pub fn parse(target_list: &TargetList) -> Result { let enable_doctests = env::var("CROSS_UNSTABLE_ENABLE_DOCTESTS") .map(|s| bool_from_envvar(&s)) .unwrap_or_default(); + let is_remote = env::var("CROSS_REMOTE") + .map(|s| bool_from_envvar(&s)) + .unwrap_or_default(); Ok(Args { all, @@ -160,6 +164,7 @@ pub fn parse(target_list: &TargetList) -> Result { target_dir, docker_in_docker, enable_doctests, + is_remote, manifest_path, }) } diff --git a/src/docker/engine.rs b/src/docker/engine.rs new file mode 100644 index 000000000..300dbde7c --- /dev/null +++ b/src/docker/engine.rs @@ -0,0 +1,73 @@ +use std::env; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use crate::errors::*; +use crate::extensions::CommandExt; + +const DOCKER: &str = "docker"; +const PODMAN: &str = "podman"; + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum EngineType { + Docker, + Podman, + PodmanRemote, + Other, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Engine { + pub kind: EngineType, + pub path: PathBuf, + pub is_remote: bool, +} + +impl Engine { + pub fn new(is_remote: bool, verbose: bool) -> Result { + let path = get_container_engine() + .map_err(|_| eyre::eyre!("no container engine found")) + .with_suggestion(|| "is docker or podman installed?")?; + Self::from_path(path, is_remote, verbose) + } + + pub fn from_path(path: PathBuf, is_remote: bool, verbose: bool) -> Result { + let kind = get_engine_type(&path, verbose)?; + Ok(Engine { + path, + kind, + is_remote, + }) + } + + pub fn needs_remote(&self) -> bool { + self.is_remote && self.kind == EngineType::Podman + } +} + +// determine if the container engine is docker. this fixes issues with +// any aliases (#530), and doesn't fail if an executable suffix exists. +fn get_engine_type(ce: &Path, verbose: bool) -> Result { + let stdout = Command::new(ce) + .arg("--help") + .run_and_get_stdout(verbose)? + .to_lowercase(); + + if stdout.contains("podman-remote") { + Ok(EngineType::PodmanRemote) + } else if stdout.contains("podman") { + Ok(EngineType::Podman) + } else if stdout.contains("docker") && !stdout.contains("emulate") { + Ok(EngineType::Docker) + } else { + Ok(EngineType::Other) + } +} + +pub fn get_container_engine() -> Result { + if let Ok(ce) = env::var("CROSS_CONTAINER_ENGINE") { + which::which(ce) + } else { + which::which(DOCKER).or_else(|_| which::which(PODMAN)) + } +} diff --git a/src/docker/local.rs b/src/docker/local.rs new file mode 100644 index 000000000..28ee1d967 --- /dev/null +++ b/src/docker/local.rs @@ -0,0 +1,93 @@ +use std::path::Path; +use std::process::ExitStatus; + +use super::engine::*; +use super::shared::*; +use crate::cargo::CargoMetadata; +use crate::errors::Result; +use crate::extensions::CommandExt; +use crate::{Config, Target}; +use atty::Stream; + +#[allow(clippy::too_many_arguments)] // TODO: refactor +pub(crate) fn local_run( + target: &Target, + args: &[String], + metadata: &CargoMetadata, + config: &Config, + uses_xargo: bool, + sysroot: &Path, + verbose: bool, + docker_in_docker: bool, + cwd: &Path, +) -> Result { + let engine = Engine::new(false, verbose)?; + let dirs = Directories::create(&engine, metadata, cwd, sysroot, docker_in_docker, verbose)?; + + let mut cmd = cargo_cmd(uses_xargo); + cmd.args(args); + + let mut docker = docker_subcommand(&engine, "run"); + docker.args(&["--userns", "host"]); + docker_envvars(&mut docker, config, target)?; + + let mount_volumes = docker_mount( + &mut docker, + metadata, + config, + target, + cwd, + verbose, + |docker, val, verbose| mount(docker, val, "", verbose), + |_| {}, + )?; + + docker.arg("--rm"); + + docker_seccomp(&mut docker, engine.kind, target, verbose)?; + docker_user_id(&mut docker, engine.kind); + + docker + .args(&["-v", &format!("{}:/xargo:Z", dirs.xargo.display())]) + .args(&["-v", &format!("{}:/cargo:Z", dirs.cargo.display())]) + // Prevent `bin` from being mounted inside the Docker container. + .args(&["-v", "/cargo/bin"]); + if mount_volumes { + docker.args(&[ + "-v", + &format!( + "{}:{}:Z", + dirs.host_root.display(), + dirs.mount_root.display() + ), + ]); + } else { + docker.args(&["-v", &format!("{}:/project:Z", dirs.host_root.display())]); + } + docker + .args(&["-v", &format!("{}:/rust:Z,ro", dirs.sysroot.display())]) + .args(&["-v", &format!("{}:/target:Z", dirs.target.display())]); + docker_cwd(&mut docker, metadata, &dirs, cwd, mount_volumes)?; + + // When running inside NixOS or using Nix packaging we need to add the Nix + // Store to the running container so it can load the needed binaries. + if let Some(ref nix_store) = dirs.nix_store { + docker.args(&[ + "-v", + &format!("{}:{}:Z", nix_store.display(), nix_store.display()), + ]); + } + + if atty::is(Stream::Stdin) { + docker.arg("-i"); + if atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { + docker.arg("-t"); + } + } + + docker + .arg(&image(config, target)?) + .args(&["sh", "-c", &format!("PATH=$PATH:/rust/bin {:?}", cmd)]) + .run_and_get_status(verbose) + .map_err(Into::into) +} diff --git a/src/docker/mod.rs b/src/docker/mod.rs new file mode 100644 index 000000000..f99d2365d --- /dev/null +++ b/src/docker/mod.rs @@ -0,0 +1,57 @@ +mod engine; +mod local; +mod remote; +mod shared; + +use self::local::*; + +pub use self::engine::*; +pub use self::remote::*; +pub use self::shared::*; + +use std::path::Path; +use std::process::ExitStatus; + +use crate::cargo::CargoMetadata; +use crate::errors::*; +use crate::{Config, Target}; + +#[allow(clippy::too_many_arguments)] // TODO: refactor +pub fn container_run( + target: &Target, + args: &[String], + metadata: &CargoMetadata, + config: &Config, + uses_xargo: bool, + sysroot: &Path, + verbose: bool, + docker_in_docker: bool, + is_remote: bool, + cwd: &Path, +) -> Result { + if is_remote { + remote_run( + target, + args, + metadata, + config, + uses_xargo, + sysroot, + verbose, + docker_in_docker, + cwd, + ) + } else { + local_run( + target, + args, + metadata, + config, + uses_xargo, + sysroot, + verbose, + docker_in_docker, + cwd, + ) + } +} diff --git a/src/docker/remote.rs b/src/docker/remote.rs new file mode 100644 index 000000000..60e69698c --- /dev/null +++ b/src/docker/remote.rs @@ -0,0 +1,648 @@ +use std::io::Read; +use std::path::{Path, PathBuf}; +use std::process::{Command, ExitStatus}; +use std::{env, fs}; + +use super::engine::Engine; +use super::shared::*; +use crate::cargo::CargoMetadata; +use crate::config::{bool_from_envvar, Config}; +use crate::errors::Result; +use crate::extensions::CommandExt; +use crate::file; +use crate::rustc; +use crate::Target; +use atty::Stream; + +struct DeleteVolume<'a>(&'a Engine, &'a VolumeId, bool); + +impl<'a> Drop for DeleteVolume<'a> { + fn drop(&mut self) { + if let VolumeId::Discard(id) = self.1 { + volume_rm(self.0, id, self.2).ok(); + } + } +} + +struct DeleteContainer<'a>(&'a Engine, &'a str, bool); + +impl<'a> Drop for DeleteContainer<'a> { + fn drop(&mut self) { + container_stop(self.0, self.1, self.2).ok(); + container_rm(self.0, self.1, self.2).ok(); + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum ContainerState { + Created, + Running, + Paused, + Restarting, + Dead, + Exited, + DoesNotExist, +} + +impl ContainerState { + pub fn new(state: &str) -> Result { + match state { + "created" => Ok(ContainerState::Created), + "running" => Ok(ContainerState::Running), + "paused" => Ok(ContainerState::Paused), + "restarting" => Ok(ContainerState::Restarting), + "dead" => Ok(ContainerState::Dead), + "exited" => Ok(ContainerState::Exited), + "" => Ok(ContainerState::DoesNotExist), + _ => eyre::bail!("unknown container state: got {state}"), + } + } + + pub fn is_stopped(&self) -> bool { + matches!(self, Self::Exited | Self::DoesNotExist) + } + + pub fn exists(&self) -> bool { + !matches!(self, Self::DoesNotExist) + } +} + +#[derive(Debug)] +enum VolumeId { + Keep(String), + Discard(String), +} + +impl VolumeId { + fn create(engine: &Engine, container: &str, verbose: bool) -> Result { + let keep_id = format!("{container}-keep"); + if volume_exists(engine, &keep_id, verbose)? { + Ok(Self::Keep(keep_id)) + } else { + Ok(Self::Discard(container.to_string())) + } + } +} + +impl AsRef for VolumeId { + fn as_ref(&self) -> &str { + match self { + Self::Keep(s) => s, + Self::Discard(s) => s, + } + } +} + +fn create_volume_dir( + engine: &Engine, + container: &str, + dir: &Path, + verbose: bool, +) -> Result { + // make our parent directory if needed + docker_subcommand(engine, "exec") + .arg(container) + .args(&["sh", "-c", &format!("mkdir -p '{}'", dir.display())]) + .run_and_get_status(verbose) + .map_err(Into::into) +} + +// copy files for a docker volume, for remote host support +fn copy_volume_files( + engine: &Engine, + container: &str, + src: &Path, + dst: &Path, + verbose: bool, +) -> Result { + docker_subcommand(engine, "cp") + .arg("-a") + .arg(&src.display().to_string()) + .arg(format!("{container}:{}", dst.display())) + .run_and_get_status(verbose) + .map_err(Into::into) +} + +fn is_cachedir_tag(path: &Path) -> Result { + let mut buffer = [b'0'; 43]; + let mut file = fs::OpenOptions::new().read(true).open(path)?; + file.read_exact(&mut buffer)?; + + Ok(&buffer == b"Signature: 8a477f597d28d172789f06886806bc55") +} + +fn is_cachedir(entry: &fs::DirEntry) -> bool { + // avoid any cached directories when copying + // see https://bford.info/cachedir/ + if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { + let path = entry.path().join("CACHEDIR.TAG"); + path.exists() && is_cachedir_tag(&path).unwrap_or(false) + } else { + false + } +} + +// copy files for a docker volume, for remote host support +fn copy_volume_files_nocache( + engine: &Engine, + container: &str, + src: &Path, + dst: &Path, + verbose: bool, +) -> Result { + // avoid any cached directories when copying + // see https://bford.info/cachedir/ + let tempdir = tempfile::tempdir()?; + let temppath = tempdir.path(); + copy_dir(src, temppath, 0, |e, _| !is_cachedir(e))?; + copy_volume_files(engine, container, temppath, dst, verbose) +} + +pub fn copy_volume_container_xargo( + engine: &Engine, + container: &str, + xargo_dir: &Path, + target: &Target, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + // only need to copy the rustlib files for our current target. + let triple = target.triple(); + let relpath = Path::new("lib").join("rustlib").join(&triple); + let src = xargo_dir.join(&relpath); + let dst = mount_prefix.join("xargo").join(&relpath); + if Path::new(&src).exists() { + create_volume_dir(engine, container, dst.parent().unwrap(), verbose)?; + copy_volume_files(engine, container, &src, &dst, verbose)?; + } + + Ok(()) +} + +pub fn copy_volume_container_cargo( + engine: &Engine, + container: &str, + cargo_dir: &Path, + mount_prefix: &Path, + copy_registry: bool, + verbose: bool, +) -> Result<()> { + let dst = mount_prefix.join("cargo"); + let copy_registry = env::var("CROSS_REMOTE_COPY_REGISTRY") + .map(|s| bool_from_envvar(&s)) + .unwrap_or(copy_registry); + + if copy_registry { + copy_volume_files(engine, container, cargo_dir, &dst, verbose)?; + } else { + // can copy a limit subset of files: the rest is present. + create_volume_dir(engine, container, &dst, verbose)?; + for entry in fs::read_dir(cargo_dir)? { + let file = entry?; + let basename = file.file_name().to_string_lossy().into_owned(); + if !basename.starts_with('.') && !matches!(basename.as_ref(), "git" | "registry") { + copy_volume_files(engine, container, &file.path(), &dst, verbose)?; + } + } + } + + Ok(()) +} + +// recursively copy a directory into another +fn copy_dir(src: &Path, dst: &Path, depth: u32, skip: Skip) -> Result<()> +where + Skip: Copy + Fn(&fs::DirEntry, u32) -> bool, +{ + for entry in fs::read_dir(src)? { + let file = entry?; + if skip(&file, depth) { + continue; + } + + let src_path = file.path(); + let dst_path = dst.join(file.file_name()); + if file.file_type()?.is_file() { + fs::copy(&src_path, &dst_path)?; + } else { + fs::create_dir(&dst_path).ok(); + copy_dir(&src_path, &dst_path, depth + 1, skip)?; + } + } + + Ok(()) +} + +pub fn copy_volume_container_rust( + engine: &Engine, + container: &str, + sysroot: &Path, + target: &Target, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + // the rust toolchain is quite large, but most of it isn't needed + // we need the bin, libexec, and etc directories, and part of the lib directory. + let dst = mount_prefix.join("rust"); + create_volume_dir(engine, container, &dst, verbose)?; + for basename in ["bin", "libexec", "etc"] { + let file = sysroot.join(basename); + copy_volume_files(engine, container, &file, &dst, verbose)?; + } + + // the lib directories are rather large, so we want only a subset. + // now, we use a temp directory for everything else in the libdir + // we can pretty safely assume we don't have symlinks here. + let rustlib = Path::new("lib").join("rustlib"); + let src_rustlib = sysroot.join(&rustlib); + let dst_rustlib = dst.join(&rustlib); + + let tempdir = tempfile::tempdir()?; + let temppath = tempdir.path(); + copy_dir(&sysroot.join("lib"), temppath, 0, |e, d| { + d == 0 && e.file_name() == "rustlib" + })?; + fs::create_dir(&temppath.join("rustlib")).ok(); + copy_dir( + &src_rustlib, + &temppath.join("rustlib"), + 0, + |entry, depth| { + if depth != 0 { + return false; + } + let file_type = match entry.file_type() { + Ok(file_type) => file_type, + Err(_) => return true, + }; + let file_name = entry.file_name(); + !(file_type.is_file() || file_name == "src" || file_name == "etc") + }, + )?; + copy_volume_files(engine, container, temppath, &dst.join("lib"), verbose)?; + // must make the `dst.join("lib")` **after** here, or we copy temp into lib. + create_volume_dir(engine, container, &dst_rustlib, verbose)?; + + // we first copy over the toolchain file, then everything besides it. + // since we don't want to call docker 100x, we copy the intermediate + // files to a temp directory so they're cleaned up afterwards. + let toolchain_path = src_rustlib.join(&target.triple()); + if toolchain_path.exists() { + copy_volume_files(engine, container, &toolchain_path, &dst_rustlib, verbose)?; + } + + // now we need to copy over the host toolchain too, since it has + // some requirements to find std libraries, etc. + let rustc = sysroot.join("bin").join("rustc"); + let libdir = Command::new(rustc) + .args(&["--print", "target-libdir"]) + .run_and_get_stdout(verbose)?; + let host_toolchain_path = Path::new(libdir.trim()).parent().unwrap(); + copy_volume_files( + engine, + container, + host_toolchain_path, + &dst_rustlib, + verbose, + )?; + + Ok(()) +} + +pub fn volume_create(engine: &Engine, volume: &str, verbose: bool) -> Result { + docker_subcommand(engine, "volume") + .args(&["create", volume]) + .run_and_get_status(verbose) + .map_err(Into::into) +} + +pub fn volume_rm(engine: &Engine, volume: &str, verbose: bool) -> Result { + docker_subcommand(engine, "volume") + .args(&["rm", volume]) + .run_and_get_status(verbose) + .map_err(Into::into) +} + +pub fn volume_exists(engine: &Engine, volume: &str, verbose: bool) -> Result { + docker_subcommand(engine, "volume") + .args(&["inspect", volume]) + .run_and_get_output(verbose) + .map(|output| output.status.success()) + .map_err(Into::into) +} + +pub fn container_stop(engine: &Engine, container: &str, verbose: bool) -> Result { + docker_subcommand(engine, "stop") + .arg(container) + .run_and_get_status(verbose) + .map_err(Into::into) +} + +pub fn container_rm(engine: &Engine, container: &str, verbose: bool) -> Result { + docker_subcommand(engine, "rm") + .arg(container) + .run_and_get_status(verbose) + .map_err(Into::into) +} + +pub fn container_state(engine: &Engine, container: &str, verbose: bool) -> Result { + let stdout = docker_subcommand(engine, "ps") + .arg("-a") + .args(&["--filter", &format!("name={container}")]) + .args(&["--format", "{{.State}}"]) + .run_and_get_stdout(verbose)?; + ContainerState::new(stdout.trim()) +} + +fn path_hash(path: &Path) -> String { + sha1_smol::Sha1::from(path.display().to_string().as_bytes()) + .digest() + .to_string() + .get(..5) + .expect("sha1 is expected to be at least 5 characters long") + .to_string() +} + +pub fn container_identifier( + target: &Target, + metadata: &CargoMetadata, + dirs: &Directories, +) -> Result { + let host_version_meta = rustc::version_meta()?; + let commit_hash = host_version_meta + .commit_hash + .unwrap_or(host_version_meta.short_version_string); + + let workspace_root = &metadata.workspace_root; + let package = metadata + .packages + .iter() + .find(|p| p.manifest_path.parent().unwrap() == workspace_root) + .unwrap_or_else(|| metadata.packages.get(0).unwrap()); + + let name = &package.name; + let triple = target.triple(); + let project_hash = path_hash(&package.manifest_path); + let toolchain_hash = path_hash(&dirs.sysroot); + Ok(format!( + "cross-{name}-{triple}-{project_hash}-{toolchain_hash}-{commit_hash}" + )) +} + +fn remote_mount_path(val: &Path, verbose: bool) -> Result { + let host_path = file::canonicalize(val)?; + canonicalize_mount_path(&host_path, verbose) +} + +#[allow(clippy::too_many_arguments)] // TODO: refactor +pub(crate) fn remote_run( + target: &Target, + args: &[String], + metadata: &CargoMetadata, + config: &Config, + uses_xargo: bool, + sysroot: &Path, + verbose: bool, + docker_in_docker: bool, + cwd: &Path, +) -> Result { + let engine = Engine::new(true, verbose)?; + let dirs = Directories::create(&engine, metadata, cwd, sysroot, docker_in_docker, verbose)?; + + let mut cmd = cargo_cmd(uses_xargo); + cmd.args(args); + + let mount_prefix = "/cross"; + + // the logic is broken into the following steps + // 1. get our unique identifiers and cleanup from a previous run. + // 2. create a data volume to store everything + // 3. start our container with the data volume and all envvars + // 4. copy all mounted volumes over + // 5. create symlinks for all mounted data + // 6. execute our cargo command inside the container + // 7. copy data from target dir back to host + // 8. stop container and delete data volume + // + // we use structs that wrap the resources to ensure they're dropped + // in the correct order even on error, to ensure safe cleanup + + // 1. get our unique identifiers and cleanup from a previous run. + // this can happen if we didn't gracefully exit before + let container = container_identifier(target, metadata, &dirs)?; + let volume = VolumeId::create(&engine, &container, verbose)?; + let state = container_state(&engine, &container, verbose)?; + if !state.is_stopped() { + eprintln!("warning: container {container} was running."); + container_stop(&engine, &container, verbose)?; + } + if state.exists() { + eprintln!("warning: container {container} was exited."); + container_rm(&engine, &container, verbose)?; + } + if let VolumeId::Discard(ref id) = volume { + if volume_exists(&engine, id, verbose)? { + eprintln!("warning: temporary volume {container} existed."); + volume_rm(&engine, id, verbose)?; + } + } + + // 2. create our volume to copy all our data over to + if let VolumeId::Discard(ref id) = volume { + volume_create(&engine, id, verbose)?; + } + let _volume_deletter = DeleteVolume(&engine, &volume, verbose); + + // 3. create our start container command here + let mut docker = docker_subcommand(&engine, "run"); + docker.args(&["--userns", "host"]); + docker.args(&["--name", &container]); + docker.args(&["-v", &format!("{}:{mount_prefix}", volume.as_ref())]); + docker_envvars(&mut docker, config, target)?; + + let mut volumes = vec![]; + let mount_volumes = docker_mount( + &mut docker, + metadata, + config, + target, + cwd, + verbose, + |_, val, verbose| remote_mount_path(val, verbose), + |(src, dst)| volumes.push((src, dst)), + )?; + + docker_seccomp(&mut docker, engine.kind, target, verbose)?; + + // Prevent `bin` from being mounted inside the Docker container. + docker.args(&["-v", &format!("{mount_prefix}/cargo/bin")]); + + // When running inside NixOS or using Nix packaging we need to add the Nix + // Store to the running container so it can load the needed binaries. + if let Some(ref nix_store) = dirs.nix_store { + volumes.push((nix_store.display().to_string(), nix_store.to_path_buf())) + } + + docker.arg("-d"); + if atty::is(Stream::Stdin) && atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { + docker.arg("-t"); + } + + docker + .arg(&image(config, target)?) + // ensure the process never exits until we stop it + .args(&["sh", "-c", "sleep infinity"]) + .run_and_get_status(verbose)?; + let _container_deletter = DeleteContainer(&engine, &container, verbose); + + // 4. copy all mounted volumes over + let copy_cache = env::var("CROSS_REMOTE_COPY_CACHE") + .map(|s| bool_from_envvar(&s)) + .unwrap_or_default(); + let copy = |src, dst: &PathBuf| { + if copy_cache { + copy_volume_files(&engine, &container, src, dst, verbose) + } else { + copy_volume_files_nocache(&engine, &container, src, dst, verbose) + } + }; + let mount_prefix_path = mount_prefix.as_ref(); + if let VolumeId::Discard(_) = volume { + copy_volume_container_xargo( + &engine, + &container, + &dirs.xargo, + target, + mount_prefix_path, + verbose, + )?; + copy_volume_container_cargo( + &engine, + &container, + &dirs.cargo, + mount_prefix_path, + false, + verbose, + )?; + copy_volume_container_rust( + &engine, + &container, + &dirs.sysroot, + target, + mount_prefix_path, + verbose, + )?; + } + let mount_root = if mount_volumes { + // cannot panic: absolute unix path, must have root + let rel_mount_root = dirs.mount_root.strip_prefix("/").unwrap(); + let mount_root = mount_prefix_path.join(rel_mount_root); + if rel_mount_root != PathBuf::new() { + create_volume_dir(&engine, &container, mount_root.parent().unwrap(), verbose)?; + } + mount_root + } else { + mount_prefix_path.join("project") + }; + copy(&dirs.host_root, &mount_root)?; + + let mut copied = vec![ + (&dirs.xargo, mount_prefix_path.join("xargo")), + (&dirs.cargo, mount_prefix_path.join("cargo")), + (&dirs.sysroot, mount_prefix_path.join("rust")), + (&dirs.host_root, mount_root.clone()), + ]; + let mut to_symlink = vec![]; + let target_dir = file::canonicalize(&dirs.target)?; + let target_dir = if let Ok(relpath) = target_dir.strip_prefix(&dirs.host_root) { + // target dir is in the project, just symlink it in + let target_dir = mount_root.join(relpath); + to_symlink.push((target_dir.clone(), "/target".to_string())); + target_dir + } else { + // outside project, need to copy the target data over + // only do if we're copying over cached files. + let target_dir = mount_prefix_path.join("target"); + if copy_cache { + copy(&dirs.target, &target_dir)?; + } else { + create_volume_dir(&engine, &container, &target_dir, verbose)?; + } + + copied.push((&dirs.target, target_dir.clone())); + target_dir + }; + for (src, dst) in volumes.iter() { + let src: &Path = src.as_ref(); + if let Some((psrc, pdst)) = copied.iter().find(|(p, _)| src.starts_with(p)) { + // path has already been copied over + let relpath = src.strip_prefix(psrc).unwrap(); + to_symlink.push((pdst.join(relpath), dst.display().to_string())); + } else { + let rel_dst = dst.strip_prefix("/").unwrap(); + let mount_dst = mount_prefix_path.join(rel_dst); + if rel_dst != PathBuf::new() { + create_volume_dir(&engine, &container, mount_dst.parent().unwrap(), verbose)?; + } + copy(src, &mount_dst)?; + } + } + + // 5. create symlinks for copied data + let mut symlink = vec!["set -e pipefail".to_string()]; + if verbose { + symlink.push("set -x".to_string()); + } + symlink.push(format!( + "chown -R {uid}:{gid} {mount_prefix}/*", + uid = user_id(), + gid = group_id(), + )); + // need a simple script to add symlinks, but not override existing files. + symlink.push(format!( + "prefix=\"{mount_prefix}\" + +symlink_recurse() {{ + for f in \"${{1}}\"/*; do + dst=${{f#\"$prefix\"}} + if [ -f \"${{dst}}\" ]; then + echo \"invalid: got unexpected file at ${{dst}}\" 1>&2 + exit 1 + elif [ -d \"${{dst}}\" ]; then + symlink_recurse \"${{f}}\" + else + ln -s \"${{f}}\" \"${{dst}}\" + fi + done +}} + +symlink_recurse \"${{prefix}}\" +" + )); + for (src, dst) in to_symlink { + symlink.push(format!("ln -s \"{}\" \"{}\"", src.display(), dst)); + } + docker_subcommand(&engine, "exec") + .arg(&container) + .args(&["sh", "-c", &symlink.join("\n")]) + .run_and_get_status(verbose) + .map_err::(Into::into)?; + + // 6. execute our cargo command inside the container + let mut docker = docker_subcommand(&engine, "exec"); + docker_user_id(&mut docker, engine.kind); + docker_cwd(&mut docker, metadata, &dirs, cwd, mount_volumes)?; + docker.arg(&container); + docker.args(&["sh", "-c", &format!("PATH=$PATH:/rust/bin {:?}", cmd)]); + let status = docker.run_and_get_status(verbose).map_err(Into::into); + + // 7. copy data from our target dir back to host + docker_subcommand(&engine, "cp") + .arg("-a") + .arg(&format!("{container}:{}", target_dir.display())) + .arg(&dirs.target.parent().unwrap()) + .run_and_get_status(verbose) + .map_err::(Into::into)?; + + status +} diff --git a/src/seccomp.json b/src/docker/seccomp.json similarity index 100% rename from src/seccomp.json rename to src/docker/seccomp.json diff --git a/src/docker.rs b/src/docker/shared.rs similarity index 67% rename from src/docker.rs rename to src/docker/shared.rs index 9408e01ee..8980edd65 100644 --- a/src/docker.rs +++ b/src/docker/shared.rs @@ -1,68 +1,132 @@ use std::io::Write; use std::path::{Path, PathBuf}; -use std::process::{Command, ExitStatus}; +use std::process::Command; use std::{env, fs}; +use super::engine::*; use crate::cargo::CargoMetadata; +use crate::config::Config; +use crate::errors::*; use crate::extensions::{CommandExt, SafeCommand}; -use crate::file::write_file; +use crate::file::{self, write_file}; use crate::id; -use crate::{errors::*, file}; -use crate::{Config, Target}; -use atty::Stream; -use eyre::bail; +use crate::Target; pub const CROSS_IMAGE: &str = "ghcr.io/cross-rs"; const DOCKER_IMAGES: &[&str] = &include!(concat!(env!("OUT_DIR"), "/docker-images.rs")); -const DOCKER: &str = "docker"; -const PODMAN: &str = "podman"; + // secured profile based off the docker documentation for denied syscalls: // https://docs.docker.com/engine/security/seccomp/#significant-syscalls-blocked-by-the-default-profile // note that we've allow listed `clone` and `clone3`, which is necessary // to fork the process, and which podman allows by default. -const SECCOMP: &str = include_str!("seccomp.json"); - -#[derive(Debug, PartialEq, Eq)] -enum EngineType { - Docker, - Podman, - Other, +pub(crate) const SECCOMP: &str = include_str!("seccomp.json"); + +#[derive(Debug)] +pub struct Directories { + pub cargo: PathBuf, + pub xargo: PathBuf, + pub target: PathBuf, + pub nix_store: Option, + pub host_root: PathBuf, + pub mount_root: PathBuf, + pub mount_cwd: PathBuf, + pub sysroot: PathBuf, } -// determine if the container engine is docker. this fixes issues with -// any aliases (#530), and doesn't fail if an executable suffix exists. -fn get_engine_type(ce: &Path, verbose: bool) -> Result { - let stdout = Command::new(ce) - .arg("--help") - .run_and_get_stdout(verbose)? - .to_lowercase(); - - if stdout.contains("podman") { - Ok(EngineType::Podman) - } else if stdout.contains("docker") && !stdout.contains("emulate") { - Ok(EngineType::Docker) - } else { - Ok(EngineType::Other) +impl Directories { + #[allow(unused_variables)] + pub fn create( + engine: &Engine, + metadata: &CargoMetadata, + cwd: &Path, + sysroot: &Path, + docker_in_docker: bool, + verbose: bool, + ) -> Result { + let mount_finder = if docker_in_docker { + MountFinder::new(docker_read_mount_paths(engine)?) + } else { + MountFinder::default() + }; + let home_dir = + home::home_dir().ok_or_else(|| eyre::eyre!("could not find home directory"))?; + let cargo = home::cargo_home()?; + let xargo = env::var_os("XARGO_HOME") + .map(PathBuf::from) + .unwrap_or_else(|| home_dir.join(".xargo")); + let nix_store = env::var_os("NIX_STORE").map(PathBuf::from); + let target = &metadata.target_directory; + + // create the directories we are going to mount before we mount them, + // otherwise `docker` will create them but they will be owned by `root` + fs::create_dir(&cargo).ok(); + fs::create_dir(&xargo).ok(); + fs::create_dir(&target).ok(); + + let cargo = mount_finder.find_mount_path(cargo); + let xargo = mount_finder.find_mount_path(xargo); + let target = mount_finder.find_mount_path(target); + + // root is either workspace_root, or, if we're outside the workspace root, the current directory + let host_root = mount_finder.find_mount_path(if metadata.workspace_root.starts_with(cwd) { + cwd + } else { + &metadata.workspace_root + }); + + // root is either workspace_root, or, if we're outside the workspace root, the current directory + let mount_root: PathBuf; + #[cfg(target_os = "windows")] + { + // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. + mount_root = wslpath(&host_root, verbose)?; + } + #[cfg(not(target_os = "windows"))] + { + mount_root = mount_finder.find_mount_path(host_root.clone()); + } + let mount_cwd: PathBuf; + #[cfg(target_os = "windows")] + { + // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. + mount_cwd = wslpath(cwd, verbose)?; + } + #[cfg(not(target_os = "windows"))] + { + mount_cwd = mount_finder.find_mount_path(cwd); + } + let sysroot = mount_finder.find_mount_path(sysroot); + + Ok(Directories { + cargo, + xargo, + target, + nix_store, + host_root, + mount_root, + mount_cwd, + sysroot, + }) } } -pub fn get_container_engine() -> Result { - if let Ok(ce) = env::var("CROSS_CONTAINER_ENGINE") { - which::which(ce) - } else { - which::which(DOCKER).or_else(|_| which::which(PODMAN)) +pub fn docker_command(engine: &Engine) -> Command { + let mut command = Command::new(&engine.path); + if engine.needs_remote() { + // if we're using podman and not podman-remote, need `--remote`. + command.arg("--remote"); } + command } -pub fn docker_command(engine: &Path, subcommand: &str) -> Result { - let mut command = Command::new(engine); +pub fn docker_subcommand(engine: &Engine, subcommand: &str) -> Command { + let mut command = docker_command(engine); command.arg(subcommand); - command.args(&["--userns", "host"]); - Ok(command) + command } /// Register binfmt interpreters -pub fn register(target: &Target, verbose: bool) -> Result<()> { +pub(crate) fn container_register(target: &Target, is_remote: bool, verbose: bool) -> Result<()> { let cmd = if target.is_windows() { // https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html "mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc && \ @@ -72,8 +136,9 @@ pub fn register(target: &Target, verbose: bool) -> Result<()> { binfmt-support qemu-user-static" }; - let engine = get_container_engine()?; - docker_command(&engine, "run")? + let engine = Engine::new(is_remote, verbose)?; + docker_subcommand(&engine, "run") + .args(&["--userns", "host"]) .arg("--privileged") .arg("--rm") .arg("ubuntu:16.04") @@ -89,7 +154,9 @@ fn validate_env_var(var: &str) -> Result<(&str, Option<&str>)> { }; if key == "CROSS_RUNNER" { - bail!("CROSS_RUNNER environment variable name is reserved and cannot be pass through"); + eyre::bail!( + "CROSS_RUNNER environment variable name is reserved and cannot be pass through" + ); } Ok((key, value)) @@ -99,115 +166,110 @@ fn parse_docker_opts(value: &str) -> Result> { shell_words::split(value).wrap_err_with(|| format!("could not parse docker opts of {}", value)) } -#[allow(unused_variables)] -pub fn mount(cmd: &mut Command, val: &Path, verbose: bool) -> Result { - let host_path = file::canonicalize(&val) - .wrap_err_with(|| format!("when canonicalizing path `{}`", val.display()))?; - let mount_path: PathBuf; - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - mount_path = wslpath(&host_path, verbose)?; - } - #[cfg(not(target_os = "windows"))] - { - mount_path = host_path.clone(); +pub(crate) fn cargo_cmd(uses_xargo: bool) -> SafeCommand { + if uses_xargo { + SafeCommand::new("xargo") + } else { + SafeCommand::new("cargo") } - cmd.args(&[ +} + +pub(crate) fn mount( + docker: &mut Command, + val: &Path, + prefix: &str, + verbose: bool, +) -> Result { + let host_path = file::canonicalize(val)?; + let mount_path = canonicalize_mount_path(&host_path, verbose)?; + docker.args(&[ "-v", - &format!("{}:{}", host_path.display(), mount_path.display()), + &format!("{}:{prefix}{}", host_path.display(), mount_path.display()), ]); Ok(mount_path) } -#[allow(clippy::too_many_arguments)] // TODO: refactor -pub fn run( - target: &Target, - args: &[String], - metadata: &CargoMetadata, - config: &Config, - uses_xargo: bool, - sysroot: &Path, - verbose: bool, - docker_in_docker: bool, - cwd: &Path, -) -> Result { - let engine = get_container_engine() - .map_err(|_| eyre::eyre!("no container engine found")) - .with_suggestion(|| "is docker or podman installed?")?; - let engine_type = get_engine_type(&engine, verbose)?; - - let mount_finder = if docker_in_docker { - MountFinder::new(docker_read_mount_paths(&engine)?) - } else { - MountFinder::default() - }; +pub(crate) fn docker_envvars(docker: &mut Command, config: &Config, target: &Target) -> Result<()> { + for ref var in config.env_passthrough(target)? { + validate_env_var(var)?; - let home_dir = home::home_dir().ok_or_else(|| eyre::eyre!("could not find home directory"))?; - let cargo_dir = home::cargo_home()?; - let xargo_dir = env::var_os("XARGO_HOME") - .map(PathBuf::from) - .unwrap_or_else(|| home_dir.join(".xargo")); - let nix_store_dir = env::var_os("NIX_STORE").map(PathBuf::from); - let target_dir = &metadata.target_directory; - - // create the directories we are going to mount before we mount them, - // otherwise `docker` will create them but they will be owned by `root` - fs::create_dir(&target_dir).ok(); - fs::create_dir(&cargo_dir).ok(); - fs::create_dir(&xargo_dir).ok(); - - // update paths to the host mounts path. - let cargo_dir = mount_finder.find_mount_path(cargo_dir); - let xargo_dir = mount_finder.find_mount_path(xargo_dir); - let target_dir = mount_finder.find_mount_path(target_dir); - // root is either workspace_root, or, if we're outside the workspace root, the current directory - let host_root = mount_finder.find_mount_path(if metadata.workspace_root.starts_with(cwd) { - cwd - } else { - &metadata.workspace_root - }); - let mount_root: PathBuf; - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - mount_root = wslpath(&host_root, verbose)?; + // Only specifying the environment variable name in the "-e" + // flag forwards the value from the parent shell + docker.args(&["-e", var]); } - #[cfg(not(target_os = "windows"))] - { - mount_root = mount_finder.find_mount_path(host_root.clone()); + + let runner = config.runner(target)?; + let cross_runner = format!("CROSS_RUNNER={}", runner.unwrap_or_default()); + docker + .args(&["-e", "PKG_CONFIG_ALLOW_CROSS=1"]) + .args(&["-e", "XARGO_HOME=/xargo"]) + .args(&["-e", "CARGO_HOME=/cargo"]) + .args(&["-e", "CARGO_TARGET_DIR=/target"]) + .args(&["-e", &cross_runner]); + + if let Some(username) = id::username().unwrap() { + docker.args(&["-e", &format!("USER={username}")]); } - let mount_cwd: PathBuf; - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - mount_cwd = wslpath(cwd, verbose)?; + + if let Ok(value) = env::var("QEMU_STRACE") { + docker.args(&["-e", &format!("QEMU_STRACE={value}")]); } - #[cfg(not(target_os = "windows"))] - { - mount_cwd = mount_finder.find_mount_path(cwd); + + if let Ok(value) = env::var("CROSS_DEBUG") { + docker.args(&["-e", &format!("CROSS_DEBUG={value}")]); } - let sysroot = mount_finder.find_mount_path(sysroot); - let mut cmd = if uses_xargo { - SafeCommand::new("xargo") - } else { - SafeCommand::new("cargo") + if let Ok(value) = env::var("CROSS_CONTAINER_OPTS") { + if env::var("DOCKER_OPTS").is_ok() { + eprintln!("Warning: using both `CROSS_CONTAINER_OPTS` and `DOCKER_OPTS`."); + } + docker.args(&parse_docker_opts(&value)?); + } else if let Ok(value) = env::var("DOCKER_OPTS") { + // FIXME: remove this when we deprecate DOCKER_OPTS. + docker.args(&parse_docker_opts(&value)?); }; - cmd.args(args); - - let runner = config.runner(target)?; + Ok(()) +} - let mut docker = docker_command(&engine, "run")?; +pub(crate) fn docker_cwd( + docker: &mut Command, + metadata: &CargoMetadata, + dirs: &Directories, + cwd: &Path, + mount_volumes: bool, +) -> Result<()> { + if mount_volumes { + docker.args(&["-w".as_ref(), dirs.mount_cwd.as_os_str()]); + } else if dirs.mount_cwd == metadata.workspace_root { + docker.args(&["-w", "/project"]); + } else { + // We do this to avoid clashes with path separators. Windows uses `\` as a path separator on Path::join + let cwd = &cwd; + let working_dir = Path::new("project").join(cwd.strip_prefix(&metadata.workspace_root)?); + // No [T].join for OsStr + let mut mount_wd = std::ffi::OsString::new(); + for part in working_dir.iter() { + mount_wd.push("/"); + mount_wd.push(part); + } + docker.args(&["-w".as_ref(), mount_wd.as_os_str()]); + } - for ref var in config.env_passthrough(target)? { - validate_env_var(var)?; + Ok(()) +} - // Only specifying the environment variable name in the "-e" - // flag forwards the value from the parent shell - docker.args(&["-e", var]); - } +#[allow(clippy::too_many_arguments)] // TODO: refactor +pub(crate) fn docker_mount( + docker: &mut Command, + metadata: &CargoMetadata, + config: &Config, + target: &Target, + cwd: &Path, + verbose: bool, + mount_cb: impl Fn(&mut Command, &Path, bool) -> Result, + mut store_cb: impl FnMut((String, PathBuf)), +) -> Result { let mut mount_volumes = false; // FIXME(emilgardis 2022-04-07): This is a fallback so that if it's hard for us to do mounting logic, make it simple(r) // Preferably we would not have to do this. @@ -223,21 +285,79 @@ pub fn run( }; if let Ok(val) = value { - let mount_path = mount(&mut docker, val.as_ref(), verbose)?; + let mount_path = mount_cb(docker, val.as_ref(), verbose)?; docker.args(&["-e", &format!("{}={}", var, mount_path.display())]); + store_cb((val, mount_path)); mount_volumes = true; } } for path in metadata.path_dependencies() { - mount(&mut docker, path, verbose)?; + let mount_path = mount_cb(docker, path, verbose)?; + store_cb((path.display().to_string(), mount_path)); mount_volumes = true; } - docker.args(&["-e", "PKG_CONFIG_ALLOW_CROSS=1"]); + Ok(mount_volumes) +} - docker.arg("--rm"); +#[cfg(target_os = "windows")] +fn wslpath(path: &Path, verbose: bool) -> Result { + let wslpath = which::which("wsl.exe") + .map_err(|_| eyre::eyre!("could not find wsl.exe")) + .warning("usage of `env.volumes` requires WSL on Windows") + .suggestion("is WSL installed on the host?")?; + Command::new(wslpath) + .arg("-e") + .arg("wslpath") + .arg("-a") + .arg(path) + .run_and_get_stdout(verbose) + .wrap_err_with(|| { + format!( + "could not get linux compatible path for `{}`", + path.display() + ) + }) + .map(|s| s.trim().into()) +} + +#[allow(unused_variables)] +pub(crate) fn canonicalize_mount_path(path: &Path, verbose: bool) -> Result { + #[cfg(target_os = "windows")] + { + // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. + wslpath(path, verbose) + } + #[cfg(not(target_os = "windows"))] + { + Ok(path.to_path_buf()) + } +} + +pub(crate) fn user_id() -> String { + env::var("CROSS_CONTAINER_UID").unwrap_or_else(|_| id::user().to_string()) +} + +pub(crate) fn group_id() -> String { + env::var("CROSS_CONTAINER_GID").unwrap_or_else(|_| id::group().to_string()) +} + +pub(crate) fn docker_user_id(docker: &mut Command, engine_type: EngineType) { + // We need to specify the user for Docker, but not for Podman. + if engine_type == EngineType::Docker { + docker.args(&["--user", &format!("{}:{}", user_id(), group_id(),)]); + } +} + +#[allow(unused_variables)] +pub(crate) fn docker_seccomp( + docker: &mut Command, + engine_type: EngineType, + target: &Target, + verbose: bool, +) -> Result<()> { // docker uses seccomp now on all installations if target.needs_docker_seccomp() { let seccomp = if engine_type == EngineType::Docker && cfg!(target_os = "windows") { @@ -257,7 +377,7 @@ pub fn run( write_file(&path, false)?.write_all(SECCOMP.as_bytes())?; } #[cfg(target_os = "windows")] - if engine_type == EngineType::Podman { + if matches!(engine_type, EngineType::Podman | EngineType::PodmanRemote) { // podman weirdly expects a WSL path here, and fails otherwise path = wslpath(&path, verbose)?; } @@ -267,113 +387,16 @@ pub fn run( docker.args(&["--security-opt", &format!("seccomp={}", seccomp)]); } - // We need to specify the user for Docker, but not for Podman. - if engine_type == EngineType::Docker { - docker.args(&[ - "--user", - &format!( - "{}:{}", - env::var("CROSS_CONTAINER_UID").unwrap_or_else(|_| id::user().to_string()), - env::var("CROSS_CONTAINER_GID").unwrap_or_else(|_| id::group().to_string()), - ), - ]); - } - - docker - .args(&["-e", "XARGO_HOME=/xargo"]) - .args(&["-e", "CARGO_HOME=/cargo"]) - .args(&["-e", "CARGO_TARGET_DIR=/target"]); - - if let Some(username) = id::username().unwrap() { - docker.args(&["-e", &format!("USER={username}")]); - } - - if let Ok(value) = env::var("QEMU_STRACE") { - docker.args(&["-e", &format!("QEMU_STRACE={value}")]); - } - - if let Ok(value) = env::var("CROSS_DEBUG") { - docker.args(&["-e", &format!("CROSS_DEBUG={value}")]); - } - - if let Ok(value) = env::var("CROSS_CONTAINER_OPTS") { - if env::var("DOCKER_OPTS").is_ok() { - eprintln!("Warning: using both `CROSS_CONTAINER_OPTS` and `DOCKER_OPTS`."); - } - docker.args(&parse_docker_opts(&value)?); - } else if let Ok(value) = env::var("DOCKER_OPTS") { - // FIXME: remove this when we deprecate DOCKER_OPTS. - docker.args(&parse_docker_opts(&value)?); - }; - - docker - .args(&[ - "-e", - &format!("CROSS_RUNNER={}", runner.unwrap_or_default()), - ]) - .args(&["-v", &format!("{}:/xargo:Z", xargo_dir.display())]) - .args(&["-v", &format!("{}:/cargo:Z", cargo_dir.display())]) - // Prevent `bin` from being mounted inside the Docker container. - .args(&["-v", "/cargo/bin"]); - if mount_volumes { - docker.args(&[ - "-v", - &format!("{}:{}:Z", host_root.display(), mount_root.display()), - ]); - } else { - docker.args(&["-v", &format!("{}:/project:Z", host_root.display())]); - } - docker - .args(&["-v", &format!("{}:/rust:Z,ro", sysroot.display())]) - .args(&["-v", &format!("{}:/target:Z", target_dir.display())]); - - if mount_volumes { - docker.args(&["-w".as_ref(), mount_cwd.as_os_str()]); - } else if mount_cwd == metadata.workspace_root { - docker.args(&["-w", "/project"]); - } else { - // We do this to avoid clashes with path separators. Windows uses `\` as a path separator on Path::join - let cwd = &cwd; - let working_dir = Path::new("project").join(cwd.strip_prefix(&metadata.workspace_root)?); - // No [T].join for OsStr - let mut mount_wd = std::ffi::OsString::new(); - for part in working_dir.iter() { - mount_wd.push("/"); - mount_wd.push(part); - } - docker.args(&["-w".as_ref(), mount_wd.as_os_str()]); - } - - // When running inside NixOS or using Nix packaging we need to add the Nix - // Store to the running container so it can load the needed binaries. - if let Some(nix_store) = nix_store_dir { - docker.args(&[ - "-v", - &format!("{}:{}:Z", nix_store.display(), nix_store.display()), - ]); - } - - if atty::is(Stream::Stdin) { - docker.arg("-i"); - if atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { - docker.arg("-t"); - } - } - - docker - .arg(&image(config, target)?) - .args(&["sh", "-c", &format!("PATH=$PATH:/rust/bin {:?}", cmd)]) - .run_and_get_status(verbose) - .map_err(Into::into) + Ok(()) } -pub fn image(config: &Config, target: &Target) -> Result { +pub(crate) fn image(config: &Config, target: &Target) -> Result { if let Some(image) = config.image(target)? { return Ok(image); } if !DOCKER_IMAGES.contains(&target.triple()) { - bail!( + eyre::bail!( "`cross` does not provide a Docker image for target {target}, \ specify a custom image in `Cross.toml`." ); @@ -388,33 +411,11 @@ pub fn image(config: &Config, target: &Target) -> Result { Ok(format!("{CROSS_IMAGE}/{target}:{version}")) } -#[cfg(target_os = "windows")] -fn wslpath(path: &Path, verbose: bool) -> Result { - let wslpath = which::which("wsl.exe") - .map_err(|_| eyre::eyre!("could not find wsl.exe")) - .warning("usage of `env.volumes` requires WSL on Windows") - .suggestion("is WSL installed on the host?")?; - - Command::new(wslpath) - .arg("-e") - .arg("wslpath") - .arg("-a") - .arg(path) - .run_and_get_stdout(verbose) - .wrap_err_with(|| { - format!( - "could not get linux compatible path for `{}`", - path.display() - ) - }) - .map(|s| s.trim().into()) -} - -fn docker_read_mount_paths(engine: &Path) -> Result> { +fn docker_read_mount_paths(engine: &Engine) -> Result> { let hostname = env::var("HOSTNAME").wrap_err("HOSTNAME environment variable not found")?; let mut docker: Command = { - let mut command = docker_command(engine, "inspect")?; + let mut command = docker_subcommand(engine, "inspect"); command.arg(hostname); command }; diff --git a/src/file.rs b/src/file.rs index 55468df81..ea49a3434 100644 --- a/src/file.rs +++ b/src/file.rs @@ -22,6 +22,7 @@ fn read_(path: &Path) -> Result { pub fn canonicalize(path: impl AsRef) -> Result { _canonicalize(path.as_ref()) + .wrap_err_with(|| format!("when canonicalizing path `{:?}`", path.as_ref())) } fn _canonicalize(path: &Path) -> Result { diff --git a/src/lib.rs b/src/lib.rs index 085f3da7a..ba88b0fdf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,15 +39,14 @@ use config::Config; use rustc_version::Channel; use serde::Deserialize; -pub use self::cargo::{cargo_metadata_with_args, CargoMetadata, Subcommand}; use self::cross_toml::CrossToml; use self::errors::Context; -use self::rustc::{TargetList, VersionMetaExt}; -pub use self::docker::get_container_engine; -pub use self::docker::CROSS_IMAGE; +pub use self::cargo::{cargo_metadata_with_args, CargoMetadata, Subcommand}; +pub use self::docker::*; pub use self::errors::{install_panic_hook, Result}; -pub use self::extensions::{CommandExt, OutputExt}; +pub use self::extensions::*; +pub use self::rustc::{target_list, version_meta, TargetList, VersionMetaExt}; #[allow(non_camel_case_types)] #[derive(Debug, Clone, PartialEq, Eq)] @@ -246,7 +245,7 @@ impl std::fmt::Display for Target { } impl Target { - fn from(triple: &str, target_list: &TargetList) -> Target { + pub fn from(triple: &str, target_list: &TargetList) -> Target { if target_list.contains(triple) { Target::new_built_in(triple) } else { @@ -277,6 +276,32 @@ impl From<&str> for Target { } } +pub fn get_sysroot( + host: &Host, + target: &Target, + channel: Option<&str>, + verbose: bool, +) -> Result<(String, PathBuf)> { + let mut sysroot = rustc::sysroot(host, target, verbose)?; + let default_toolchain = sysroot + .file_name() + .and_then(|file_name| file_name.to_str()) + .ok_or_else(|| eyre::eyre!("couldn't get toolchain name"))?; + let toolchain = if let Some(channel) = channel { + [channel] + .iter() + .cloned() + .chain(default_toolchain.splitn(2, '-').skip(1)) + .collect::>() + .join("-") + } else { + default_toolchain.to_string() + }; + sysroot.set_file_name(&toolchain); + + Ok((toolchain, sysroot)) +} + pub fn run() -> Result { let target_list = rustc::target_list(false)?; let args = cli::parse(&target_list)?; @@ -293,8 +318,7 @@ pub fn run() -> Result { .iter() .any(|a| a == "--verbose" || a == "-v" || a == "-vv"); - let host_version_meta = - rustc_version::version_meta().wrap_err("couldn't fetch the `rustc` version")?; + let host_version_meta = rustc::version_meta()?; let cwd = std::env::current_dir()?; if let Some(metadata) = cargo_metadata_with_args(None, Some(&args), verbose)? { let host = host_version_meta.host(); @@ -315,22 +339,8 @@ pub fn run() -> Result { }; if image_exists && host.is_supported(Some(&target)) { - let mut sysroot = rustc::sysroot(&host, &target, verbose)?; - let default_toolchain = sysroot - .file_name() - .and_then(|file_name| file_name.to_str()) - .ok_or_else(|| eyre::eyre!("couldn't get toolchain name"))?; - let toolchain = if let Some(channel) = args.channel { - [channel] - .iter() - .map(|c| c.as_str()) - .chain(default_toolchain.splitn(2, '-').skip(1)) - .collect::>() - .join("-") - } else { - default_toolchain.to_string() - }; - sysroot.set_file_name(&toolchain); + let (toolchain, sysroot) = + get_sysroot(&host, &target, args.channel.as_deref(), verbose)?; let mut is_nightly = toolchain.contains("nightly"); let installed_toolchains = rustup::installed_toolchains(verbose)?; @@ -429,10 +439,10 @@ pub fn run() -> Result { && target.needs_interpreter() && !interpreter::is_registered(&target)? { - docker::register(&target, verbose)? + container_register(&target, args.is_remote, verbose)? } - return docker::run( + return container_run( &target, &filtered_args, &metadata, @@ -441,6 +451,7 @@ pub fn run() -> Result { &sysroot, verbose, args.docker_in_docker, + args.is_remote, &cwd, ); } diff --git a/src/rustc.rs b/src/rustc.rs index 7f8d33bae..ce840fdc2 100644 --- a/src/rustc.rs +++ b/src/rustc.rs @@ -57,3 +57,7 @@ pub fn sysroot(host: &Host, target: &Target, verbose: bool) -> Result { Ok(PathBuf::from(stdout)) } + +pub fn version_meta() -> Result { + rustc_version::version_meta().wrap_err("couldn't fetch the `rustc` version") +}