return IS_ENABLED(CONFIG_PPC64);
}
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+ if (!in_arena)
+ return true;
+ switch (insn->code) {
+ case BPF_STX | BPF_ATOMIC | BPF_H:
+ case BPF_STX | BPF_ATOMIC | BPF_B:
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (bpf_atomic_is_load_store(insn))
+ return false;
+ return IS_ENABLED(CONFIG_PPC64);
+ }
+ return true;
+}
+
void *arch_alloc_bpf_trampoline(unsigned int size)
{
return bpf_prog_pack_alloc(size, bpf_jit_fill_ill_insns);
break;
+ /*
+ * BPF_STX PROBE_ATOMIC (arena atomic ops)
+ */
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
+ ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
+ &jmp_off, &tmp_idx, &addrs[i + 1]);
+ if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ pr_err_ratelimited(
+ "eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ }
+ return ret;
+ }
+ /* LDARX/LWARX should land here on exception. */
+ ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
+ tmp_idx, jmp_off, dst_reg, code);
+ if (ret)
+ return ret;
+
+ /* Retrieve the dst_reg */
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
+ break;
+
/*
* BPF_STX ATOMIC (atomic ops)
*/