Skip to content

Commit

Permalink
feat(process-monitor): Detect new containers
Browse files Browse the repository at this point in the history
Enhance the process monitor with an ability to detect when a
container runtime creates a new PID namespace, which we can consider
as a creation of a new container.

Achieve that by:

* Registering the inodes of container runtime binaries we want to
  track in the user-space, saving them in a BPF map.
* In BPF, every time a process is being executed using the runtime
  binary, checking whether the PID namespace was changed.
  • Loading branch information
vadorovsky committed Oct 30, 2023
1 parent 57fb058 commit 11e1aab
Show file tree
Hide file tree
Showing 13 changed files with 528 additions and 46 deletions.
6 changes: 6 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions crates/bpf-common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,17 @@ procfs = { workspace = true }
libc = { workspace = true }
glob = { workspace = true }
hex = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
lazy_static = { workspace = true }
regex = { workspace = true }

# Test deps
which = { workspace = true, optional = true }
cgroups-rs = { workspace = true, optional = true }
rand = { workspace = true, optional = true }

validatron = { workspace = true }

[build-dependencies]
bpf-builder = { workspace = true }
146 changes: 146 additions & 0 deletions crates/bpf-common/src/parsing/containers.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
use std::{
fs::File,
io::{self, BufReader},
process::Command,
};

use serde::{Deserialize, Serialize};
use thiserror::Error;
use validatron::Validatron;

#[derive(Error, Debug)]
pub enum ContainerError {
#[error("reading file {path} failed")]
ReadFile {
#[source]
source: io::Error,
path: String,
},
#[error("parsing config from `{path}` failed")]
ParseConfig {
#[source]
source: serde_json::error::Error,
path: String,
},
#[error("executing {command} failed")]
Exec {
#[source]
source: io::Error,
command: String,
},
#[error("executing {command} failed with status {code:?}")]
ExecStatus { command: String, code: Option<i32> },
#[error("parsing image digest {digest} failed")]
ParseDigest { digest: String },
#[error("invalid hash function {hash_fn}")]
InvalidHashFunction { hash_fn: String },
}

#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ContainerId {
Docker(String),
Libpod(String),
}

#[derive(Debug, Deserialize)]
struct DockerConfig {
#[serde(rename = "Config")]
config: DockerContainerConfig,
#[serde(rename = "Image")]
image_digest: String,
#[serde(rename = "Name")]
name: String,
}

#[derive(Debug, Deserialize)]
struct DockerContainerConfig {
#[serde(rename = "Image")]
image: String,
}

#[derive(Debug, Deserialize)]
struct LibpodConfig {
#[serde(rename = "Name")]
name: String,
#[serde(rename = "Image")]
image: String,
#[serde(rename = "ImageDigest")]
image_digest: String,
}

#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Validatron)]
pub struct ContainerInfo {
pub id: String,
pub name: String,
pub image: String,
pub image_digest: String,
}

pub fn get_container_info(id: ContainerId) -> Result<ContainerInfo, ContainerError> {
match id {
ContainerId::Docker(id) => {
let path = format!("/var/lib/docker/containers/{}/config.v2.json", id);
let file = File::open(&path).map_err(|source| ContainerError::ReadFile {
source,
path: path.clone(),
})?;

let reader = BufReader::new(file);
let config: DockerConfig = serde_json::from_reader(reader)
.map_err(|source| ContainerError::ParseConfig { source, path })?;

let name = config.name;
let name = if let Some(name) = name.strip_prefix('/') {
name.to_owned()
} else {
name
};
let image = config.config.image;
let image_digest = config.image_digest;

Ok(ContainerInfo {
id,
name,
image,
image_digest,
})
}
ContainerId::Libpod(id) => {
let output = Command::new("podman")
.arg("inspect")
.arg("--type=container")
.arg(&id)
.output()
.map_err(|source| ContainerError::Exec {
source,
command: "podman".to_owned(),
})?;

if !output.status.success() {
return Err(ContainerError::ExecStatus {
command: "podman".to_owned(),
code: output.status.code(),
});
}

let config: LibpodConfig =
serde_json::from_slice(&output.stdout).map_err(|source| {
ContainerError::ParseConfig {
source,
path: format!("podman inspect --type=container {id}"),
}
})?;

let name = config.name;
let image = config.image;
let image_digest = config.image_digest;

Ok(ContainerInfo {
id,
name,
image,
image_digest,
})
}
}
}
1 change: 1 addition & 0 deletions crates/bpf-common/src/parsing/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
pub mod containers;
pub mod procfs;

mod buffer_index;
Expand Down
67 changes: 67 additions & 0 deletions crates/bpf-common/src/parsing/procfs.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,28 @@
//! Utility functions used to extract data from procfs

use glob::glob;
use lazy_static::lazy_static;
use nix::unistd::{Pid, Uid};
use regex::Regex;
use std::{
fs::{self, File},
io::{self, prelude::*, BufReader},
path::PathBuf,
};
use thiserror::Error;

use super::containers::ContainerId;

// Special value used to indicate openat should use the current working directory.
const AT_FDCWD: i32 = -100;

lazy_static! {
/// Pattern for matching cgroups created by Docker.
static ref RE_CGROUP_DOCKER: Regex = Regex::new(r"docker.(?P<id>[0-9a-f]+)(?:[^0-9a-f])").unwrap();
/// Pattern for matching cgroups created by libpod/podman.
static ref RE_CGROUP_LIBPOD: Regex = Regex::new(r"libpod.(?P<id>[0-9a-f]+)(?:[^0-9a-f])").unwrap();
}

#[derive(Error, Debug)]
pub enum ProcfsError {
#[error("reading link failed {path}")]
Expand Down Expand Up @@ -153,3 +164,59 @@ pub fn get_running_processes() -> Result<Vec<Pid>, ProcfsError> {
})
.collect()
}

fn get_container_id_from_cgroup(cgroup_info: &str) -> Option<ContainerId> {
if let Some(caps) = RE_CGROUP_DOCKER.captures(cgroup_info) {
let id = caps.name("id").unwrap();
return Some(ContainerId::Docker(id.as_str().to_string()));
}
if let Some(caps) = RE_CGROUP_LIBPOD.captures(cgroup_info) {
let id = caps.name("id").unwrap();
return Some(ContainerId::Libpod(id.as_str().to_string()));
}
None
}

pub fn get_process_container_id(pid: Pid) -> Result<Option<ContainerId>, ProcfsError> {
let path = format!("/proc/{pid}/cgroup");
let file = File::open(&path).map_err(|source| ProcfsError::ReadFile { source, path })?;

let reader = BufReader::new(file);
for line in reader.lines().flatten() {
if let Some(container_id) = get_container_id_from_cgroup(&line) {
return Ok(Some(container_id));
}
}

Ok(None)
}

#[cfg(test)]
mod test {
use super::*;

#[test]
fn test_get_container_id_from_cgroup() {
let container_id = get_container_id_from_cgroup("0::/init.scope");
assert_eq!(container_id, None);

let container_id = get_container_id_from_cgroup("0::/user.slice/user-1000.slice/user@1000.service/app.slice/app-gnome-Alacritty-3266.scope");
assert_eq!(container_id, None);

let container_id = get_container_id_from_cgroup("0::/system.slice/docker-14467e1a5a6da17b660a130932f1ab568f35586bac8bc5147987d9bba4da08de.scop");
assert_eq!(
container_id,
Some(ContainerId::Docker(
"14467e1a5a6da17b660a130932f1ab568f35586bac8bc5147987d9bba4da08de".to_owned()
))
);

let container_id = get_container_id_from_cgroup("0::/user.slice/user-1000.slice/user@1000.service/user.slice/libpod-3f084b4c7b789c1a0f174da3fcd339e31125d3096b3ff46a0bef4fad71d09362.scope/container");
assert_eq!(
container_id,
Some(ContainerId::Libpod(
"3f084b4c7b789c1a0f174da3fcd339e31125d3096b3ff46a0bef4fad71d09362".to_owned()
))
);
}
}
8 changes: 7 additions & 1 deletion crates/bpf-common/src/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
//!
use core::fmt;
use std::{
collections::HashSet, convert::TryFrom, fmt::Display, mem::size_of, sync::Arc, time::Duration,
collections::HashSet, convert::TryFrom, fmt::Display, mem::size_of, path::PathBuf, sync::Arc,
time::Duration,
};

use aya::{
Expand Down Expand Up @@ -178,6 +179,11 @@ pub enum ProgramError {
BtfError(#[from] BtfError),
#[error("running background aya task {0}")]
JoinError(#[from] JoinError),
#[error("could not find the inode of {path}")]
InodeError {
path: PathBuf,
io_error: Box<std::io::Error>,
},
}

pub struct ProgramBuilder {
Expand Down
3 changes: 3 additions & 0 deletions crates/bpf-filtering/src/initializer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,18 +70,21 @@ pub async fn setup_events_filter(
}
for process in &process_tree {
initializer.update(process)?;

process_tracker.update(TrackerUpdate::Fork {
ppid: process.parent,
pid: process.pid,
timestamp: Timestamp::from(0),
namespaces: process.namespaces,
is_new_container: false,
});
process_tracker.update(TrackerUpdate::Exec {
pid: process.pid,
image: process.image.to_string(),
timestamp: Timestamp::from(0),
argv: Vec::new(),
namespaces: process.namespaces,
is_new_container: false,
});
}

Expand Down
1 change: 1 addition & 0 deletions crates/modules/process-monitor/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ nix = { workspace = true }
log = { workspace = true }
thiserror = { workspace = true }
anyhow = { workspace = true }
which = { workspace = true }

[build-dependencies]
bpf-builder = { workspace = true }
Loading

0 comments on commit 11e1aab

Please sign in to comment.