Skip to content

Commit

Permalink
selftests/bpf: Add selftests for load-acquire and store-release instr…
Browse files Browse the repository at this point in the history
…uctions

Add the following ./test_progs tests:

  * atomics/load_acquire
  * atomics/store_release
  * arena_atomics/load_acquire
  * arena_atomics/store_release

They depend on the pre-defined __BPF_FEATURE_LOAD_ACQ_STORE_REL feature
macro, which implies -mcpu>=v4.

  $ ALLOWLIST=atomics/load_acquire,atomics/store_release,
  $ ALLOWLIST+=arena_atomics/load_acquire,arena_atomics/store_release

  $ ./test_progs-cpuv4 -a $ALLOWLIST

  #3/9     arena_atomics/load_acquire:OK
  #3/10    arena_atomics/store_release:OK
...
  #10/8    atomics/load_acquire:OK
  #10/9    atomics/store_release:OK

  $ ./test_progs -v -a $ALLOWLIST

  test_load_acquire:SKIP:Clang does not support BPF load-acquire or addr_space_cast
  #3/9     arena_atomics/load_acquire:SKIP
  test_store_release:SKIP:Clang does not support BPF store-release or addr_space_cast
  #3/10    arena_atomics/store_release:SKIP
...
  test_load_acquire:SKIP:Clang does not support BPF load-acquire
  #10/8    atomics/load_acquire:SKIP
  test_store_release:SKIP:Clang does not support BPF store-release
  #10/9    atomics/store_release:SKIP

Additionally, add several ./test_verifier tests:

  #65/u atomic BPF_LOAD_ACQ access through non-pointer  OK
  #65/p atomic BPF_LOAD_ACQ access through non-pointer  OK
  #66/u atomic BPF_STORE_REL access through non-pointer  OK
  #66/p atomic BPF_STORE_REL access through non-pointer  OK

  #67/u BPF_ATOMIC load-acquire, 8-bit OK
  #67/p BPF_ATOMIC load-acquire, 8-bit OK
  #68/u BPF_ATOMIC load-acquire, 16-bit OK
  #68/p BPF_ATOMIC load-acquire, 16-bit OK
  #69/u BPF_ATOMIC load-acquire, 32-bit OK
  #69/p BPF_ATOMIC load-acquire, 32-bit OK
  #70/u BPF_ATOMIC load-acquire, 64-bit OK
  #70/p BPF_ATOMIC load-acquire, 64-bit OK
  #71/u Cannot load-acquire from uninitialized src_reg OK
  #71/p Cannot load-acquire from uninitialized src_reg OK

  #76/u BPF_ATOMIC store-release, 8-bit OK
  #76/p BPF_ATOMIC store-release, 8-bit OK
  #77/u BPF_ATOMIC store-release, 16-bit OK
  #77/p BPF_ATOMIC store-release, 16-bit OK
  #78/u BPF_ATOMIC store-release, 32-bit OK
  #78/p BPF_ATOMIC store-release, 32-bit OK
  #79/u BPF_ATOMIC store-release, 64-bit OK
  #79/p BPF_ATOMIC store-release, 64-bit OK
  #80/u Cannot store-release from uninitialized src_reg OK
  #80/p Cannot store-release from uninitialized src_reg OK

Reviewed-by: Josh Don <joshdon@google.com>
Signed-off-by: Peilin Ye <yepeilin@google.com>
  • Loading branch information
peilin-ye authored and Kernel Patches Daemon committed Dec 21, 2024
1 parent f95d7d1 commit ab3e6a2
Show file tree
Hide file tree
Showing 8 changed files with 393 additions and 18 deletions.
2 changes: 2 additions & 0 deletions include/linux/filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,8 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
* BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
* BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
* BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
* BPF_LOAD_ACQ dst_reg = smp_load_acquire(src_reg + off16)
* BPF_STORE_REL smp_store_release(dst_reg + off16, src_reg)
*/

#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
Expand Down
61 changes: 60 additions & 1 deletion tools/testing/selftests/bpf/prog_tests/arena_atomics.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,60 @@ static void test_uaf(struct arena_atomics *skel)
ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails");
}

static void test_load_acquire(struct arena_atomics *skel)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;

if (skel->data->skip_lacq_srel_tests) {
printf("%s:SKIP:Clang does not support BPF load-acquire or addr_space_cast\n",
__func__);
test__skip();
return;
}

/* No need to attach it, just run it directly */
prog_fd = bpf_program__fd(skel->progs.load_acquire);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;

ASSERT_EQ(skel->arena->load_acquire8_result, 0x12, "load_acquire8_result");
ASSERT_EQ(skel->arena->load_acquire16_result, 0x1234, "load_acquire16_result");
ASSERT_EQ(skel->arena->load_acquire32_result, 0x12345678, "load_acquire32_result");
ASSERT_EQ(skel->arena->load_acquire64_result, 0x1234567890abcdef,
"load_acquire64_result");
}

static void test_store_release(struct arena_atomics *skel)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;

if (skel->data->skip_lacq_srel_tests) {
printf("%s:SKIP:Clang does not support BPF store-release or addr_space_cast\n",
__func__);
test__skip();
return;
}

/* No need to attach it, just run it directly */
prog_fd = bpf_program__fd(skel->progs.store_release);
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;

ASSERT_EQ(skel->arena->store_release8_result, 0x12, "store_release8_result");
ASSERT_EQ(skel->arena->store_release16_result, 0x1234, "store_release16_result");
ASSERT_EQ(skel->arena->store_release32_result, 0x12345678, "store_release32_result");
ASSERT_EQ(skel->arena->store_release64_result, 0x1234567890abcdef,
"store_release64_result");
}

void test_arena_atomics(void)
{
struct arena_atomics *skel;
Expand All @@ -171,7 +225,7 @@ void test_arena_atomics(void)
if (!ASSERT_OK_PTR(skel, "arena atomics skeleton open"))
return;

if (skel->data->skip_tests) {
if (skel->data->skip_all_tests) {
printf("%s:SKIP:no ENABLE_ATOMICS_TESTS or no addr_space_cast support in clang",
__func__);
test__skip();
Expand Down Expand Up @@ -199,6 +253,11 @@ void test_arena_atomics(void)
if (test__start_subtest("uaf"))
test_uaf(skel);

if (test__start_subtest("load_acquire"))
test_load_acquire(skel);
if (test__start_subtest("store_release"))
test_store_release(skel);

cleanup:
arena_atomics__destroy(skel);
}
57 changes: 56 additions & 1 deletion tools/testing/selftests/bpf/prog_tests/atomics.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,56 @@ static void test_xchg(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
}

static void test_load_acquire(struct atomics_lskel *skel)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;

if (skel->data->skip_lacq_srel_tests) {
printf("%s:SKIP:Clang does not support BPF load-acquire\n", __func__);
test__skip();
return;
}

/* No need to attach it, just run it directly */
prog_fd = skel->progs.load_acquire.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;

ASSERT_EQ(skel->bss->load_acquire8_result, 0x12, "load_acquire8_result");
ASSERT_EQ(skel->bss->load_acquire16_result, 0x1234, "load_acquire16_result");
ASSERT_EQ(skel->bss->load_acquire32_result, 0x12345678, "load_acquire32_result");
ASSERT_EQ(skel->bss->load_acquire64_result, 0x1234567890abcdef, "load_acquire64_result");
}

static void test_store_release(struct atomics_lskel *skel)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;

if (skel->data->skip_lacq_srel_tests) {
printf("%s:SKIP:Clang does not support BPF store-release\n", __func__);
test__skip();
return;
}

/* No need to attach it, just run it directly */
prog_fd = skel->progs.store_release.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
return;

ASSERT_EQ(skel->bss->store_release8_result, 0x12, "store_release8_result");
ASSERT_EQ(skel->bss->store_release16_result, 0x1234, "store_release16_result");
ASSERT_EQ(skel->bss->store_release32_result, 0x12345678, "store_release32_result");
ASSERT_EQ(skel->bss->store_release64_result, 0x1234567890abcdef, "store_release64_result");
}

void test_atomics(void)
{
struct atomics_lskel *skel;
Expand All @@ -170,7 +220,7 @@ void test_atomics(void)
if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))
return;

if (skel->data->skip_tests) {
if (skel->data->skip_all_tests) {
printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
__func__);
test__skip();
Expand All @@ -193,6 +243,11 @@ void test_atomics(void)
if (test__start_subtest("xchg"))
test_xchg(skel);

if (test__start_subtest("load_acquire"))
test_load_acquire(skel);
if (test__start_subtest("store_release"))
test_store_release(skel);

cleanup:
atomics_lskel__destroy(skel);
}
62 changes: 60 additions & 2 deletions tools/testing/selftests/bpf/progs/arena_atomics.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,15 @@ struct {
} arena SEC(".maps");

#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
bool skip_tests __attribute((__section__(".data"))) = false;
bool skip_all_tests __attribute((__section__(".data"))) = false;
#else
bool skip_tests = true;
bool skip_all_tests = true;
#endif

#if defined(__BPF_FEATURE_LOAD_ACQ_STORE_REL) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
#else
bool skip_lacq_srel_tests = true;
#endif

__u32 pid = 0;
Expand Down Expand Up @@ -274,4 +280,56 @@ int uaf(const void *ctx)
return 0;
}

__u8 __arena_global load_acquire8_value = 0x12;
__u16 __arena_global load_acquire16_value = 0x1234;
__u32 __arena_global load_acquire32_value = 0x12345678;
__u64 __arena_global load_acquire64_value = 0x1234567890abcdef;

__u8 __arena_global load_acquire8_result = 0;
__u16 __arena_global load_acquire16_result = 0;
__u32 __arena_global load_acquire32_result = 0;
__u64 __arena_global load_acquire64_result = 0;

SEC("raw_tp/sys_enter")
int load_acquire(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;

#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
load_acquire8_result = __atomic_load_n(&load_acquire8_value, __ATOMIC_ACQUIRE);
load_acquire16_result = __atomic_load_n(&load_acquire16_value, __ATOMIC_ACQUIRE);
load_acquire32_result = __atomic_load_n(&load_acquire32_value, __ATOMIC_ACQUIRE);
load_acquire64_result = __atomic_load_n(&load_acquire64_value, __ATOMIC_ACQUIRE);
#endif

return 0;
}

__u8 __arena_global store_release8_result = 0;
__u16 __arena_global store_release16_result = 0;
__u32 __arena_global store_release32_result = 0;
__u64 __arena_global store_release64_result = 0;

SEC("raw_tp/sys_enter")
int store_release(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;

#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
__u8 val8 = 0x12;
__u16 val16 = 0x1234;
__u32 val32 = 0x12345678;
__u64 val64 = 0x1234567890abcdef;

__atomic_store_n(&store_release8_result, val8, __ATOMIC_RELEASE);
__atomic_store_n(&store_release16_result, val16, __ATOMIC_RELEASE);
__atomic_store_n(&store_release32_result, val32, __ATOMIC_RELEASE);
__atomic_store_n(&store_release64_result, val64, __ATOMIC_RELEASE);
#endif

return 0;
}

char _license[] SEC("license") = "GPL";
62 changes: 60 additions & 2 deletions tools/testing/selftests/bpf/progs/atomics.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,15 @@
#include <stdbool.h>

#ifdef ENABLE_ATOMICS_TESTS
bool skip_tests __attribute((__section__(".data"))) = false;
bool skip_all_tests __attribute((__section__(".data"))) = false;
#else
bool skip_tests = true;
bool skip_all_tests = true;
#endif

#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
#else
bool skip_lacq_srel_tests = true;
#endif

__u32 pid = 0;
Expand Down Expand Up @@ -168,3 +174,55 @@ int xchg(const void *ctx)

return 0;
}

__u8 load_acquire8_value = 0x12;
__u16 load_acquire16_value = 0x1234;
__u32 load_acquire32_value = 0x12345678;
__u64 load_acquire64_value = 0x1234567890abcdef;

__u8 load_acquire8_result = 0;
__u16 load_acquire16_result = 0;
__u32 load_acquire32_result = 0;
__u64 load_acquire64_result = 0;

SEC("raw_tp/sys_enter")
int load_acquire(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;

#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
load_acquire8_result = __atomic_load_n(&load_acquire8_value, __ATOMIC_ACQUIRE);
load_acquire16_result = __atomic_load_n(&load_acquire16_value, __ATOMIC_ACQUIRE);
load_acquire32_result = __atomic_load_n(&load_acquire32_value, __ATOMIC_ACQUIRE);
load_acquire64_result = __atomic_load_n(&load_acquire64_value, __ATOMIC_ACQUIRE);
#endif

return 0;
}

__u8 store_release8_result = 0;
__u16 store_release16_result = 0;
__u32 store_release32_result = 0;
__u64 store_release64_result = 0;

SEC("raw_tp/sys_enter")
int store_release(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;

#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
__u8 val8 = 0x12;
__u16 val16 = 0x1234;
__u32 val32 = 0x12345678;
__u64 val64 = 0x1234567890abcdef;

__atomic_store_n(&store_release8_result, val8, __ATOMIC_RELEASE);
__atomic_store_n(&store_release16_result, val16, __ATOMIC_RELEASE);
__atomic_store_n(&store_release32_result, val32, __ATOMIC_RELEASE);
__atomic_store_n(&store_release64_result, val64, __ATOMIC_RELEASE);
#endif

return 0;
}
26 changes: 14 additions & 12 deletions tools/testing/selftests/bpf/verifier/atomic_invalid.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#define __INVALID_ATOMIC_ACCESS_TEST(op) \
#define __INVALID_ATOMIC_ACCESS_TEST(op, reg) \
{ \
"atomic " #op " access through non-pointer ", \
.insns = { \
Expand All @@ -9,15 +9,17 @@
BPF_EXIT_INSN(), \
}, \
.result = REJECT, \
.errstr = "R1 invalid mem access 'scalar'" \
.errstr = #reg " invalid mem access 'scalar'" \
}
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG),
__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG, R1),
__INVALID_ATOMIC_ACCESS_TEST(BPF_LOAD_ACQ, R0),
__INVALID_ATOMIC_ACCESS_TEST(BPF_STORE_REL, R1),
Loading

0 comments on commit ab3e6a2

Please sign in to comment.