bpf: Use bytes instead of pages for bpf_jit_[charge|uncharge]_modmem

This enables sub-page memory charge and allocation.

Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20220204185742.271030-3-song@kernel.org
This commit is contained in:
Song Liu 2022-02-04 10:57:35 -08:00 committed by Alexei Starovoitov
parent fac54e2bfb
commit 3486bedd99
3 changed files with 13 additions and 14 deletions

View file

@ -846,8 +846,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
int bpf_jit_charge_modmem(u32 pages);
void bpf_jit_uncharge_modmem(u32 pages);
int bpf_jit_charge_modmem(u32 size);
void bpf_jit_uncharge_modmem(u32 size);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,

View file

@ -833,12 +833,11 @@ static int __init bpf_jit_charge_init(void)
}
pure_initcall(bpf_jit_charge_init);
int bpf_jit_charge_modmem(u32 pages)
int bpf_jit_charge_modmem(u32 size)
{
if (atomic_long_add_return(pages, &bpf_jit_current) >
(bpf_jit_limit >> PAGE_SHIFT)) {
if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
if (!bpf_capable()) {
atomic_long_sub(pages, &bpf_jit_current);
atomic_long_sub(size, &bpf_jit_current);
return -EPERM;
}
}
@ -846,9 +845,9 @@ int bpf_jit_charge_modmem(u32 pages)
return 0;
}
void bpf_jit_uncharge_modmem(u32 pages)
void bpf_jit_uncharge_modmem(u32 size)
{
atomic_long_sub(pages, &bpf_jit_current);
atomic_long_sub(size, &bpf_jit_current);
}
void *__weak bpf_jit_alloc_exec(unsigned long size)
@ -879,11 +878,11 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
pages = size / PAGE_SIZE;
if (bpf_jit_charge_modmem(pages))
if (bpf_jit_charge_modmem(size))
return NULL;
hdr = bpf_jit_alloc_exec(size);
if (!hdr) {
bpf_jit_uncharge_modmem(pages);
bpf_jit_uncharge_modmem(size);
return NULL;
}
@ -906,7 +905,7 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
u32 pages = hdr->pages;
bpf_jit_free_exec(hdr);
bpf_jit_uncharge_modmem(pages);
bpf_jit_uncharge_modmem(pages << PAGE_SHIFT);
}
/* This symbol is only overridden by archs that have different

View file

@ -213,7 +213,7 @@ static void __bpf_tramp_image_put_deferred(struct work_struct *work)
im = container_of(work, struct bpf_tramp_image, work);
bpf_image_ksym_del(&im->ksym);
bpf_jit_free_exec(im->image);
bpf_jit_uncharge_modmem(1);
bpf_jit_uncharge_modmem(PAGE_SIZE);
percpu_ref_exit(&im->pcref);
kfree_rcu(im, rcu);
}
@ -310,7 +310,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
if (!im)
goto out;
err = bpf_jit_charge_modmem(1);
err = bpf_jit_charge_modmem(PAGE_SIZE);
if (err)
goto out_free_im;
@ -332,7 +332,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
out_free_image:
bpf_jit_free_exec(im->image);
out_uncharge:
bpf_jit_uncharge_modmem(1);
bpf_jit_uncharge_modmem(PAGE_SIZE);
out_free_im:
kfree(im);
out: