bpf: Check return from set_memory_rox()

arch_protect_bpf_trampoline() and alloc_new_pack() call
set_memory_rox() which can fail, leading to unprotected memory.

Take into account return from set_memory_rox() function and add
__must_check flag to arch_protect_bpf_trampoline().

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/fe1c163c83767fde5cab31d209a4a6be3ddb3a73.1710574353.git.christophe.leroy@csgroup.eu
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
Christophe Leroy 2024-03-16 08:35:41 +01:00 committed by Martin KaFai Lau
parent e3362acd79
commit c733239f8f
7 changed files with 40 additions and 16 deletions

View File

@ -2176,8 +2176,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
void arch_protect_bpf_trampoline(void *image, unsigned int size)
int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,

View File

@ -3004,8 +3004,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
void arch_protect_bpf_trampoline(void *image, unsigned int size)
int arch_protect_bpf_trampoline(void *image, unsigned int size)
{
return 0;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,

View File

@ -1116,7 +1116,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
void *func_addr);
void *arch_alloc_bpf_trampoline(unsigned int size);
void arch_free_bpf_trampoline(void *image, unsigned int size);
void arch_protect_bpf_trampoline(void *image, unsigned int size);
int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, void *func_addr);

View File

@ -740,8 +740,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (err)
goto reset_unlock;
}
for (i = 0; i < st_map->image_pages_cnt; i++)
arch_protect_bpf_trampoline(st_map->image_pages[i], PAGE_SIZE);
for (i = 0; i < st_map->image_pages_cnt; i++) {
err = arch_protect_bpf_trampoline(st_map->image_pages[i],
PAGE_SIZE);
if (err)
goto reset_unlock;
}
if (st_map->map.map_flags & BPF_F_LINK) {
err = 0;

View File

@ -908,23 +908,30 @@ static LIST_HEAD(pack_list);
static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_prog_pack *pack;
int err;
pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
GFP_KERNEL);
if (!pack)
return NULL;
pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
if (!pack->ptr) {
kfree(pack);
return NULL;
}
if (!pack->ptr)
goto out;
bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
list_add_tail(&pack->list, &pack_list);
set_vm_flush_reset_perms(pack->ptr);
set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
err = set_memory_rox((unsigned long)pack->ptr,
BPF_PROG_PACK_SIZE / PAGE_SIZE);
if (err)
goto out;
list_add_tail(&pack->list, &pack_list);
return pack;
out:
bpf_jit_free_exec(pack->ptr);
kfree(pack);
return NULL;
}
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
@ -939,9 +946,16 @@ void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
size = round_up(size, PAGE_SIZE);
ptr = bpf_jit_alloc_exec(size);
if (ptr) {
int err;
bpf_fill_ill_insns(ptr, size);
set_vm_flush_reset_perms(ptr);
set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
err = set_memory_rox((unsigned long)ptr,
size / PAGE_SIZE);
if (err) {
bpf_jit_free_exec(ptr);
ptr = NULL;
}
}
goto out;
}

View File

@ -456,7 +456,9 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
if (err < 0)
goto out_free;
arch_protect_bpf_trampoline(im->image, im->size);
err = arch_protect_bpf_trampoline(im->image, im->size);
if (err)
goto out_free;
WARN_ON(tr->cur_image && total == 0);
if (tr->cur_image)
@ -1072,10 +1074,10 @@ void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_jit_free_exec(image);
}
void __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
int __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
{
WARN_ON_ONCE(size > PAGE_SIZE);
set_memory_rox((long)image, 1);
return set_memory_rox((long)image, 1);
}
int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,

View File

@ -133,7 +133,9 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
if (err < 0)
goto out;
arch_protect_bpf_trampoline(image, PAGE_SIZE);
err = arch_protect_bpf_trampoline(image, PAGE_SIZE);
if (err)
goto out;
prog_ret = dummy_ops_call_op(image, args);
err = dummy_ops_copy_args(args);