Skip to content

Commit

Permalink
bpf: generalise tail call map compatibility check
Browse files Browse the repository at this point in the history
The check for tail call map compatibility ensures that tail calls only
happen between maps of the same type. To ensure backwards compatibility for
XDP frags we need a similar type of check for cpumap and devmap
programs, so move the state from bpf_array_aux into bpf_map, add
xdp_has_frags to the check, and apply the same check to cpumap and devmap.

Acked-by: John Fastabend <john.fastabend@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/f19fd97c0328a39927f3ad03e1ca6b43fd53cdfd.1642758637.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
tohojo authored and Alexei Starovoitov committed Jan 21, 2022
1 parent 082c4bf commit f45d5b6
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 40 deletions.
30 changes: 19 additions & 11 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,17 @@ struct bpf_map {
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program
* that is going to use this map or by the first program which FD is
* stored in the map to make sure that all callers and callees have the
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct {
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
} owner;
};

static inline bool map_value_has_spin_lock(const struct bpf_map *map)
Expand Down Expand Up @@ -994,16 +1005,6 @@ struct bpf_prog_aux {
};

struct bpf_array_aux {
/* 'Ownership' of prog array is claimed by the first program that
* is going to use this map or by the first program which FD is
* stored in the map to make sure that all callers and callees have
* the same prog type and JITed flag.
*/
struct {
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
} owner;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
Expand Down Expand Up @@ -1178,7 +1179,14 @@ struct bpf_event_entry {
struct rcu_head rcu;
};

bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
static inline bool map_type_contains_progs(struct bpf_map *map)
{
return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
map->map_type == BPF_MAP_TYPE_DEVMAP ||
map->map_type == BPF_MAP_TYPE_CPUMAP;
}

bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);

const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
Expand Down
4 changes: 1 addition & 3 deletions kernel/bpf/arraymap.c
Original file line number Diff line number Diff line change
Expand Up @@ -837,13 +837,12 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
static void *prog_fd_array_get_ptr(struct bpf_map *map,
struct file *map_file, int fd)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_prog *prog = bpf_prog_get(fd);

if (IS_ERR(prog))
return prog;

if (!bpf_prog_array_compatible(array, prog)) {
if (!bpf_prog_map_compatible(map, prog)) {
bpf_prog_put(prog);
return ERR_PTR(-EINVAL);
}
Expand Down Expand Up @@ -1071,7 +1070,6 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
INIT_LIST_HEAD(&aux->poke_progs);
mutex_init(&aux->poke_mutex);
spin_lock_init(&aux->owner.lock);

map = array_map_alloc(attr);
if (IS_ERR(map)) {
Expand Down
28 changes: 14 additions & 14 deletions kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1829,28 +1829,30 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
}
#endif

bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
bool bpf_prog_map_compatible(struct bpf_map *map,
const struct bpf_prog *fp)
{
bool ret;

if (fp->kprobe_override)
return false;

spin_lock(&array->aux->owner.lock);

if (!array->aux->owner.type) {
spin_lock(&map->owner.lock);
if (!map->owner.type) {
/* There's no owner yet where we could check for
* compatibility.
*/
array->aux->owner.type = fp->type;
array->aux->owner.jited = fp->jited;
map->owner.type = fp->type;
map->owner.jited = fp->jited;
map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
ret = true;
} else {
ret = array->aux->owner.type == fp->type &&
array->aux->owner.jited == fp->jited;
ret = map->owner.type == fp->type &&
map->owner.jited == fp->jited &&
map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
}
spin_unlock(&array->aux->owner.lock);
spin_unlock(&map->owner.lock);

return ret;
}

Expand All @@ -1862,13 +1864,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
mutex_lock(&aux->used_maps_mutex);
for (i = 0; i < aux->used_map_cnt; i++) {
struct bpf_map *map = aux->used_maps[i];
struct bpf_array *array;

if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
if (!map_type_contains_progs(map))
continue;

array = container_of(map, struct bpf_array, map);
if (!bpf_prog_array_compatible(array, fp)) {
if (!bpf_prog_map_compatible(map, fp)) {
ret = -EINVAL;
goto out;
}
Expand Down
8 changes: 5 additions & 3 deletions kernel/bpf/cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,15 +397,17 @@ static int cpu_map_kthread_run(void *data)
return 0;
}

static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
struct bpf_map *map, int fd)
{
struct bpf_prog *prog;

prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
if (IS_ERR(prog))
return PTR_ERR(prog);

if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
if (prog->expected_attach_type != BPF_XDP_CPUMAP ||
!bpf_prog_map_compatible(map, prog)) {
bpf_prog_put(prog);
return -EINVAL;
}
Expand Down Expand Up @@ -457,7 +459,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
rcpu->map_id = map->id;
rcpu->value.qsize = value->qsize;

if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
goto free_ptr_ring;

/* Setup kthread */
Expand Down
3 changes: 2 additions & 1 deletion kernel/bpf/devmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,8 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
BPF_PROG_TYPE_XDP, false);
if (IS_ERR(prog))
goto err_put_dev;
if (prog->expected_attach_type != BPF_XDP_DEVMAP)
if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
!bpf_prog_map_compatible(&dtab->map, prog))
goto err_put_prog;
}

Expand Down
15 changes: 7 additions & 8 deletions kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -556,16 +556,14 @@ static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)

static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_map *map = filp->private_data;
const struct bpf_array *array;
struct bpf_map *map = filp->private_data;
u32 type = 0, jited = 0;

if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
array = container_of(map, struct bpf_array, map);
spin_lock(&array->aux->owner.lock);
type = array->aux->owner.type;
jited = array->aux->owner.jited;
spin_unlock(&array->aux->owner.lock);
if (map_type_contains_progs(map)) {
spin_lock(&map->owner.lock);
type = map->owner.type;
jited = map->owner.jited;
spin_unlock(&map->owner.lock);
}

seq_printf(m,
Expand Down Expand Up @@ -874,6 +872,7 @@ static int map_create(union bpf_attr *attr)
atomic64_set(&map->refcnt, 1);
atomic64_set(&map->usercnt, 1);
mutex_init(&map->freeze_mutex);
spin_lock_init(&map->owner.lock);

map->spin_lock_off = -EINVAL;
map->timer_off = -EINVAL;
Expand Down

0 comments on commit f45d5b6

Please sign in to comment.