Skip to content
This repository has been archived by the owner on Jul 11, 2023. It is now read-only.

Add hashmaps #215

Merged
merged 4 commits into from
Nov 11, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions examples/example-probes/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -73,3 +73,8 @@ required-features = ["probes"]
name = "tasks"
path = "src/tasks/main.rs"
required-features = ["probes", "kernel5_8"]

[[bin]]
name = "hashmaps"
path = "src/hashmaps/main.rs"
required-features = ["probes"]
68 changes: 68 additions & 0 deletions examples/example-probes/src/hashmaps/main.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
//! This is an example of showing difference between `PerCpuHashMap` and
//! `HashMap`. The former is per-cpu data structure and users don't need to
//! worry about race condition. The latter is global data structure so it has
//! race condition problems.
//!
//! `PerCpuArray` can be used instead of bpf stack to hold temporary values
//! that exceeds the maximum size of bpf stack (512 bytes).
#![no_std]
#![no_main]
use example_probes::hashmaps::*;
use redbpf_probes::kprobe::prelude::*;

program!(0xFFFFFFFE, "GPL");

#[map]
static mut ALT_STACK: PerCpuArray<BigStructure> = PerCpuArray::with_max_entries(1);

#[map]
static mut BIG_STRUCT: LruHashMap<i8, BigStructure> = LruHashMap::with_max_entries(16);

#[map]
static mut PCPU_MEM_ALLOC: PerCpuHashMap<usize, usize> = PerCpuHashMap::with_max_entries(16);

#[map]
static mut MEM_ALLOC: HashMap<usize, usize> = HashMap::with_max_entries(16);

#[kprobe]
unsafe fn sched_fork(_regs: Registers) {
let rnd_key = (bpf_get_prandom_u32() & 0xff) as i8;
if let Some(bigstruct) = BIG_STRUCT.get_mut(&rnd_key) {
bigstruct.f2[99] = 99;
BIG_STRUCT.set(&rnd_key, bigstruct);
} else {
// maximum size of bpf stack is 512 bytes. BigStructure struct is 808
// bytes. So it can not be located in stack. Use percpu array to hold
// temporary BigStructure value. Note that if percpu array is used for
// this purpose, the size of percpu array must be 1. This is checked by
// BPF verifier.
let bigstruct = ALT_STACK.get_mut(0).unwrap();
for x in 0..=99 {
bigstruct.f2[x] = x;
}

BIG_STRUCT.set(&rnd_key, bigstruct);
}
}

#[kprobe]
unsafe fn __kmalloc(regs: Registers) {
let mut size = regs.parm1() as usize;
let mut max: usize = 9999;
for x in 1..=12 {
size >>= 1;
if size == 0 {
max = usize::pow(2, x) - 1;
break;
}
}
if let Some(count) = PCPU_MEM_ALLOC.get_mut(&max) {
*count += 1;
let count = MEM_ALLOC.get_mut(&max).unwrap();
*count += 1;
} else {
let count = 1;
PCPU_MEM_ALLOC.set(&max, &count);
MEM_ALLOC.set(&max, &count);
}
}
15 changes: 15 additions & 0 deletions examples/example-probes/src/hashmaps/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#[repr(C)]
#[derive(Clone, Debug)]
pub struct BigStructure {
pub f1: usize,
pub f2: [usize; 100],
}

impl Default for BigStructure {
fn default() -> Self {
BigStructure {
f1: 0,
f2: [0; 100],
}
}
}
3 changes: 2 additions & 1 deletion examples/example-probes/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
pub mod bindings;

pub mod echo;
pub mod hashmaps;
pub mod mallocstacks;
pub mod tasks;
pub mod tcp_lifetime;
pub mod vfsreadlat;
pub mod tasks;
77 changes: 77 additions & 0 deletions examples/example-userspace/examples/hashmaps.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
//! This example shows usage of HashMap, PerCpuHashMap and LruHashMap. And
//! also it confirms you that hashmap has race condition problems. You should
//! consider PerCpuHashMap if your program needs to store accurate map data.

use libc;
use std::process;
use std::time::Duration;
use tokio::{signal::ctrl_c, time::sleep};
use tracing::{error, subscriber, Level};
use tracing_subscriber::FmtSubscriber;

use probes::hashmaps::BigStructure;
use redbpf::{load::Loader, HashMap, LruHashMap, PerCpuHashMap};

#[tokio::main(flavor = "current_thread")]
async fn main() {
let subscriber = FmtSubscriber::builder()
.with_max_level(Level::TRACE)
.finish();
subscriber::set_global_default(subscriber).unwrap();
if unsafe { libc::geteuid() != 0 } {
error!("You must be root to use eBPF!");
process::exit(1);
}

let mut loaded = Loader::load(probe_code()).expect("error loading probe");
for kp in loaded.kprobes_mut() {
kp.attach_kprobe(kp.name().as_str(), 0)
.expect(format!("error on attach_kprobe to {}", kp.name()).as_str());
}

let big_struct =
LruHashMap::<i8, BigStructure>::new(loaded.map("BIG_STRUCT").expect("map not found"))
.expect("error on LruHashMap::new");
let pcpu_mem_alloc =
PerCpuHashMap::<usize, usize>::new(loaded.map("PCPU_MEM_ALLOC").expect("map not found"))
.expect("error on PerCpuHashMap::new");
let mem_alloc = HashMap::<usize, usize>::new(loaded.map("MEM_ALLOC").expect("map not found"))
.expect("error on HashMap::new");
println!("Hit Ctrl-C to quit");
loop {
tokio::select! {
_ = sleep(Duration::from_secs(1)) => {}
_ = ctrl_c() => break
}

let mut alloc_stats = mem_alloc.iter().collect::<Vec<(usize, usize)>>();
alloc_stats.sort();
println!("[allocation size upto XXX bytes] => [number of __kmalloc call]");

for (size, total_cnt) in alloc_stats {
let pcpu_vals = pcpu_mem_alloc.get(size).unwrap();
let exact_cnt: usize = pcpu_vals.iter().sum();
if total_cnt != exact_cnt {
println!(
"{} => {} != {} (hashmap != pcpu hashmap)",
size, total_cnt, exact_cnt
);
} else {
println!("{} => {}", size, total_cnt);
}
}
}

println!("");
println!("iterate over big structures!");
for (_, bigstruct) in big_struct.iter() {
println!("{:?}", bigstruct);
}
}

fn probe_code() -> &'static [u8] {
include_bytes!(concat!(
env!("OUT_DIR"),
"/target/bpf/programs/hashmaps/hashmaps.elf"
))
}
89 changes: 82 additions & 7 deletions redbpf-probes/src/maps.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ kernel and user-space code.
use core::convert::TryInto;
use core::default::Default;
use core::marker::PhantomData;
use core::mem;
use core::{mem, ptr};
use cty::*;

use crate::bindings::*;
Expand Down Expand Up @@ -47,6 +47,15 @@ macro_rules! define_hashmap {
}
}
/// Returns a reference to the value corresponding to the key.
///
/// **CUATION** The value that the returned reference refers to is
/// stored at 8 bytes aligned memory. So the reference is not
/// guaranteed to be aligned properly if the alignment of the value
/// exceeds 8 bytes. So this method should not be called if the
/// alignment is greater than 8 bytes.
///
/// Use `get_val` method instead if the alignment of value is
/// greater than 8 bytes.
#[inline]
pub fn get(&mut self, key: &K) -> Option<&V> {
unsafe {
Expand All @@ -62,6 +71,17 @@ macro_rules! define_hashmap {
}
}

/// Returns a mutable reference to the value corresponding to the key.
///
/// **CUATION** The value that the returned mutable reference
/// refers to is stored at 8 bytes aligned memory. So the mutable
/// reference is not guaranteed to be aligned properly if the
/// alignment of the value exceeds 8 bytes. So this method should
/// not be called if the alignment is greater than 8 bytes.
///
/// Use `get_val` method instead if the alignment of value is
/// greater than 8 bytes. But you should call `set` method to
/// update the modified value to BPF maps.
#[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
unsafe {
Expand All @@ -77,6 +97,30 @@ macro_rules! define_hashmap {
}
}

/// Returns a value corresponding to the key
///
/// **NOTE** It is better to use more efficient `get_mut` method
/// instead if the alignment of the value is equal to or less than
/// 8 bytes. i.e, alignment is 8, 4, 2 bytes or 1 byte. Rust
/// compiler expects that the value a reference refers to should be
/// aligned properly. But the Linux kernel does not guarantee the
/// alignment of the value the rust compiler assumes but the Linux
/// kernel just stores values at 8 bytes aligned memory.
#[inline]
pub fn get_val(&mut self, key: &K) -> Option<V> {
unsafe {
let value = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut c_void,
key as *const _ as *const c_void,
);
if value.is_null() {
None
} else {
Some(ptr::read_unaligned(value as *const V))
}
}
}

/// Set the `value` in the map for `key`
#[inline]
pub fn set(&mut self, key: &K, value: &V) {
Expand Down Expand Up @@ -176,17 +220,48 @@ macro_rules! define_array {
};
}
define_hashmap!(
/// Hash table map.
/// Hash table map
///
/// High level API for BPF_MAP_TYPE_HASH maps.
/// High level API of BPF_MAP_TYPE_HASH maps for BPF programs.
///
/// For userspace API, see [`redbpf::HashMap`](../../redbpf/struct.HashMap.html)
/// If you are looking for userspace API, see
/// [`redbpf::HashMap`](../../redbpf/struct.HashMap.html) instead.
HashMap,
bpf_map_type_BPF_MAP_TYPE_HASH
);
// define_hashmap!(PerCpuHashMap, bpf_map_type_BPF_MAP_TYPE_PERCPU_HASH); // userspace part is not implemented yet
// define_hashmap!(LruHashMap, bpf_map_type_BPF_MAP_TYPE_LRU_HASH); // userspace part is not implemented yet
// define_hashmap!(LruPerCpuHashMap, bpf_map_type_BPF_MAP_TYPE_LRU_PERCPU_HASH); // userspace part is not implemented yet
define_hashmap!(
/// Per-cpu hash table map
///
/// High level API of BPF_MAP_TYPE_PERCPU_HASH maps for BPF programs.
///
/// If you are looking for userspace API, see
/// [`redbpf::PerCpuHashMap`](../../redbpf/struct.PerCpuHashMap.html)
/// instead.
PerCpuHashMap,
bpf_map_type_BPF_MAP_TYPE_PERCPU_HASH
);
define_hashmap!(
/// LRU hash table map
///
/// High level API of BPF_MAP_TYPE_LRU_HASH maps for BPF programs.
///
/// If you are looking for userspace API, see
/// [`redbpf::LruHashMap`](../../redbpf/struct.LruHashMap.html) instead.
LruHashMap,
bpf_map_type_BPF_MAP_TYPE_LRU_HASH
);
define_hashmap!(
/// LRU per-cpu hash table map
///
/// High level API of BPF_MAP_TYPE_LRU_PERCPU_HASH maps for BPF programs.
///
/// If you are looking for userspace API, see
/// [`redbpf::LruPerCpuHashMap`](../../redbpf/struct.LruPerCpuHashMap.html)
/// instead.
LruPerCpuHashMap,
bpf_map_type_BPF_MAP_TYPE_LRU_PERCPU_HASH
);

define_array!(
/// BPF array map for BPF programs
///
Expand Down
Loading