mirror of
https://github.com/torvalds/linux
synced 2024-07-25 04:30:21 +00:00
powerpc/bpf/32: Add instructions for atomic_[cmp]xchg
This adds two atomic opcodes BPF_XCHG and BPF_CMPXCHG on ppc32, both of which include the BPF_FETCH flag. The kernel's atomic_cmpxchg operation fundamentally has 3 operands, but we only have two register fields. Therefore the operand we compare against (the kernel's API calls it 'old') is hard-coded to be BPF_REG_R0. Also, kernel's atomic_cmpxchg returns the previous value at dst_reg + off. JIT the same for BPF too with return value put in BPF_REG_0. BPF_REG_R0 = atomic_cmpxchg(dst_reg + off, BPF_REG_R0, src_reg); Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> (ppc64le) Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220610155552.25892-6-hbathini@linux.ibm.com
This commit is contained in:
parent
aea7ef8a82
commit
2d9206b227
|
@ -297,6 +297,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||||
u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
|
u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
|
||||||
u32 tmp_reg = bpf_to_ppc(TMP_REG);
|
u32 tmp_reg = bpf_to_ppc(TMP_REG);
|
||||||
u32 size = BPF_SIZE(code);
|
u32 size = BPF_SIZE(code);
|
||||||
|
u32 save_reg, ret_reg;
|
||||||
s16 off = insn[i].off;
|
s16 off = insn[i].off;
|
||||||
s32 imm = insn[i].imm;
|
s32 imm = insn[i].imm;
|
||||||
bool func_addr_fixed;
|
bool func_addr_fixed;
|
||||||
|
@ -799,6 +800,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||||
* BPF_STX ATOMIC (atomic ops)
|
* BPF_STX ATOMIC (atomic ops)
|
||||||
*/
|
*/
|
||||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||||
|
save_reg = _R0;
|
||||||
|
ret_reg = src_reg;
|
||||||
|
|
||||||
bpf_set_seen_register(ctx, tmp_reg);
|
bpf_set_seen_register(ctx, tmp_reg);
|
||||||
bpf_set_seen_register(ctx, ax_reg);
|
bpf_set_seen_register(ctx, ax_reg);
|
||||||
|
|
||||||
|
@ -829,6 +833,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||||
case BPF_XOR | BPF_FETCH:
|
case BPF_XOR | BPF_FETCH:
|
||||||
EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
|
EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
|
||||||
break;
|
break;
|
||||||
|
case BPF_CMPXCHG:
|
||||||
|
/*
|
||||||
|
* Return old value in BPF_REG_0 for BPF_CMPXCHG &
|
||||||
|
* in src_reg for other cases.
|
||||||
|
*/
|
||||||
|
ret_reg = bpf_to_ppc(BPF_REG_0);
|
||||||
|
|
||||||
|
/* Compare with old value in BPF_REG_0 */
|
||||||
|
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
|
||||||
|
/* Don't set if different from old value */
|
||||||
|
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
|
||||||
|
fallthrough;
|
||||||
|
case BPF_XCHG:
|
||||||
|
save_reg = src_reg;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
|
pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
|
||||||
code, i);
|
code, i);
|
||||||
|
@ -836,15 +855,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||||
}
|
}
|
||||||
|
|
||||||
/* store new value */
|
/* store new value */
|
||||||
EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
|
EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
|
||||||
/* we're done if this succeeded */
|
/* we're done if this succeeded */
|
||||||
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
||||||
|
|
||||||
/* For the BPF_FETCH variant, get old data into src_reg */
|
/* For the BPF_FETCH variant, get old data into src_reg */
|
||||||
if (imm & BPF_FETCH) {
|
if (imm & BPF_FETCH) {
|
||||||
EMIT(PPC_RAW_MR(src_reg, ax_reg));
|
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
|
||||||
if (!fp->aux->verifier_zext)
|
if (!fp->aux->verifier_zext)
|
||||||
EMIT(PPC_RAW_LI(src_reg_h, 0));
|
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue