Skip to content

powerpc64/bpf: Add jit support for load_acquire and store_release #9317

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: bpf-next_base
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/ppc-opcode.h
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,7 @@
#define PPC_RAW_SC() (0x44000002)
#define PPC_RAW_SYNC() (0x7c0004ac)
#define PPC_RAW_ISYNC() (0x4c00012c)
#define PPC_RAW_LWSYNC() (0x7c2004ac)

/*
* Define what the VSX XX1 form instructions will look like, then add
Expand Down
82 changes: 82 additions & 0 deletions arch/powerpc/net/bpf_jit_comp64.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,71 @@ asm (
" blr ;"
);

static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image)
{
u32 code = insn.code;
u32 dst_reg = bpf_to_ppc(insn.dst_reg);
u32 src_reg = bpf_to_ppc(insn.src_reg);
u32 size = BPF_SIZE(code);
u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
s16 off = insn.off;
s32 imm = insn.imm;

switch (imm) {
case BPF_LOAD_ACQ:
switch (size) {
case BPF_B:
EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
break;
case BPF_H:
EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
break;
case BPF_W:
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
break;
case BPF_DW:
if (off % 4) {
EMIT(PPC_RAW_LI(tmp1_reg, off));
EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
} else {
EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
}
break;
}
EMIT(PPC_RAW_LWSYNC());
break;
case BPF_STORE_REL:
EMIT(PPC_RAW_LWSYNC());
switch (size) {
case BPF_B:
EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
break;
case BPF_H:
EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
break;
case BPF_W:
EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
break;
case BPF_DW:
if (off % 4) {
EMIT(PPC_RAW_LI(tmp2_reg, off));
EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
} else {
EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
}
break;
}
break;
default:
pr_err_ratelimited("unexpected atomic load/store op code %02x\n",
imm);
return -EINVAL;
}

return 0;
}

/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
Expand Down Expand Up @@ -898,8 +963,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/*
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_B:
case BPF_STX | BPF_ATOMIC | BPF_H:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
if (bpf_atomic_is_load_store(&insn[i])) {
ret = emit_atomic_ld_st(insn[i], ctx, image);
if (ret)
return ret;

if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
} else if (size == BPF_B || size == BPF_H) {
pr_err_ratelimited(
"eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
return -EOPNOTSUPP;
}

save_reg = tmp2_reg;
ret_reg = src_reg;

Expand Down
3 changes: 2 additions & 1 deletion tools/testing/selftests/bpf/progs/bpf_misc.h
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,8 @@

#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \
(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) || \
(defined(__TARGET_ARCH_powerpc))
#define CAN_USE_LOAD_ACQ_STORE_REL
#endif

Expand Down
Loading