From 82da3040133d0936dee7c222317b3519abb78aa4 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 9 Jul 2024 11:29:44 +0200 Subject: [PATCH] 6.6-stable patches added patches: revert-bpf-take-return-from-set_memory_ro-into-account-with-bpf_prog_lock_ro.patch revert-bpf-take-return-from-set_memory_rox-into-account-with-bpf_jit_binary_lock_ro.patch revert-powerpc-bpf-rename-powerpc64_jit_data-to-powerpc_jit_data.patch revert-powerpc-bpf-use-bpf_jit_binary_pack_.patch --- ...o-into-account-with-bpf_prog_lock_ro.patch | 88 ++++ ...-account-with-bpf_jit_binary_lock_ro.patch | 220 ++++++++++ ...werpc64_jit_data-to-powerpc_jit_data.patch | 55 +++ ...powerpc-bpf-use-bpf_jit_binary_pack_.patch | 396 ++++++++++++++++++ queue-6.6/series | 4 + 5 files changed, 763 insertions(+) create mode 100644 queue-6.6/revert-bpf-take-return-from-set_memory_ro-into-account-with-bpf_prog_lock_ro.patch create mode 100644 queue-6.6/revert-bpf-take-return-from-set_memory_rox-into-account-with-bpf_jit_binary_lock_ro.patch create mode 100644 queue-6.6/revert-powerpc-bpf-rename-powerpc64_jit_data-to-powerpc_jit_data.patch create mode 100644 queue-6.6/revert-powerpc-bpf-use-bpf_jit_binary_pack_.patch diff --git a/queue-6.6/revert-bpf-take-return-from-set_memory_ro-into-account-with-bpf_prog_lock_ro.patch b/queue-6.6/revert-bpf-take-return-from-set_memory_ro-into-account-with-bpf_prog_lock_ro.patch new file mode 100644 index 00000000000..0f9fe73d8da --- /dev/null +++ b/queue-6.6/revert-bpf-take-return-from-set_memory_ro-into-account-with-bpf_prog_lock_ro.patch @@ -0,0 +1,88 @@ +From 0ae29b60ec7e2d5d92d397af9e321802f63bc10d Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman +Date: Tue, 9 Jul 2024 11:14:08 +0200 +Subject: Revert "bpf: Take return from set_memory_ro() into account with bpf_prog_lock_ro()" + +From: Greg Kroah-Hartman + +This reverts commit fdd411af8178edc6b7bf260f8fa4fba1bedd0a6d which is +commit 7d2cc63eca0c993c99d18893214abf8f85d566d8 upstream. + +It is part of a series that is reported to both break the arm64 builds +and instantly crashes the powerpc systems at the first load of a bpf +program. So revert it for now until it can come back in a safe way. + +Reported-by: matoro +Reported-by: Vitaly Chikunov +Reported-by: WangYuli +Link: https://lore.kernel.org/r/5A29E00D83AB84E3+20240706031101.637601-1-wangyuli@uniontech.com +Link: https://lore.kernel.org/r/cf736c5e37489e7dc7ffd67b9de2ab47@matoro.tk +Cc: Hari Bathini +Cc: Song Liu +Cc: Michael Ellerman +Cc: Christophe Leroy +Cc: Kees Cook +Cc: Puranjay Mohan +Cc: Ilya Leoshkevich # s390x +Cc: Tiezhu Yang # LoongArch +Cc: Johan Almbladh # MIPS Part +Cc: Alexei Starovoitov +Cc: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/filter.h | 5 ++--- + kernel/bpf/core.c | 4 +--- + kernel/bpf/verifier.c | 8 ++------ + 3 files changed, 5 insertions(+), 12 deletions(-) + +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -842,15 +842,14 @@ bpf_ctx_narrow_access_offset(u32 off, u3 + + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) + +-static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp) ++static inline void bpf_prog_lock_ro(struct bpf_prog *fp) + { + #ifndef CONFIG_BPF_JIT_ALWAYS_ON + if (!fp->jited) { + set_vm_flush_reset_perms(fp); +- return set_memory_ro((unsigned long)fp, fp->pages); ++ set_memory_ro((unsigned long)fp, fp->pages); + } + #endif +- return 0; + } + + static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -2375,9 +2375,7 @@ struct bpf_prog *bpf_prog_select_runtime + } + + finalize: +- *err = bpf_prog_lock_ro(fp); +- if (*err) +- return fp; ++ bpf_prog_lock_ro(fp); + + /* The tail call compatibility check can only be done at + * this late stage as we need to determine, if we deal +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -18625,13 +18625,9 @@ static int jit_subprogs(struct bpf_verif + * bpf_prog_load will add the kallsyms for the main program. + */ + for (i = 1; i < env->subprog_cnt; i++) { +- err = bpf_prog_lock_ro(func[i]); +- if (err) +- goto out_free; +- } +- +- for (i = 1; i < env->subprog_cnt; i++) ++ bpf_prog_lock_ro(func[i]); + bpf_prog_kallsyms_add(func[i]); ++ } + + /* Last step: make now unused interpreter insns from main + * prog consistent for later dump requests, so they can diff --git a/queue-6.6/revert-bpf-take-return-from-set_memory_rox-into-account-with-bpf_jit_binary_lock_ro.patch b/queue-6.6/revert-bpf-take-return-from-set_memory_rox-into-account-with-bpf_jit_binary_lock_ro.patch new file mode 100644 index 00000000000..dd6d503b3a5 --- /dev/null +++ b/queue-6.6/revert-bpf-take-return-from-set_memory_rox-into-account-with-bpf_jit_binary_lock_ro.patch @@ -0,0 +1,220 @@ +From eb45299dc8475452c38f7b72ca35778a257b063d Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman +Date: Tue, 9 Jul 2024 11:14:05 +0200 +Subject: Revert "bpf: Take return from set_memory_rox() into account with bpf_jit_binary_lock_ro()" + +From: Greg Kroah-Hartman + +This reverts commit 08f6c05feb1db21653e98ca84ea04ca032d014c7 which is +commit e60adf513275c3a38e5cb67f7fd12387e43a3ff5 upstream. + +It is part of a series that is reported to both break the arm64 builds +and instantly crashes the powerpc systems at the first load of a bpf +program. So revert it for now until it can come back in a safe way. + +Reported-by: matoro +Reported-by: Vitaly Chikunov +Reported-by: WangYuli +Link: https://lore.kernel.org/r/5A29E00D83AB84E3+20240706031101.637601-1-wangyuli@uniontech.com +Link: https://lore.kernel.org/r/cf736c5e37489e7dc7ffd67b9de2ab47@matoro.tk +Cc: Hari Bathini +Cc: Song Liu +Cc: Michael Ellerman +Cc: Christophe Leroy +Cc: Kees Cook +Cc: Puranjay Mohan +Cc: Ilya Leoshkevich # s390x +Cc: Tiezhu Yang # LoongArch +Cc: Johan Almbladh # MIPS Part +Cc: Alexei Starovoitov +Cc: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm/net/bpf_jit_32.c | 25 +++++++++++++------------ + arch/loongarch/net/bpf_jit.c | 22 ++++++---------------- + arch/mips/net/bpf_jit_comp.c | 3 +-- + arch/parisc/net/bpf_jit_core.c | 8 +------- + arch/s390/net/bpf_jit_comp.c | 6 +----- + arch/sparc/net/bpf_jit_comp_64.c | 6 +----- + arch/x86/net/bpf_jit_comp32.c | 3 ++- + include/linux/filter.h | 5 ++--- + 8 files changed, 27 insertions(+), 51 deletions(-) + +--- a/arch/arm/net/bpf_jit_32.c ++++ b/arch/arm/net/bpf_jit_32.c +@@ -1982,21 +1982,28 @@ struct bpf_prog *bpf_int_jit_compile(str + /* If building the body of the JITed code fails somehow, + * we fall back to the interpretation. + */ +- if (build_body(&ctx) < 0) +- goto out_free; ++ if (build_body(&ctx) < 0) { ++ image_ptr = NULL; ++ bpf_jit_binary_free(header); ++ prog = orig_prog; ++ goto out_imms; ++ } + build_epilogue(&ctx); + + /* 3.) Extra pass to validate JITed Code */ +- if (validate_code(&ctx)) +- goto out_free; ++ if (validate_code(&ctx)) { ++ image_ptr = NULL; ++ bpf_jit_binary_free(header); ++ prog = orig_prog; ++ goto out_imms; ++ } + flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); + + if (bpf_jit_enable > 1) + /* there are 2 passes here */ + bpf_jit_dump(prog->len, image_size, 2, ctx.target); + +- if (bpf_jit_binary_lock_ro(header)) +- goto out_free; ++ bpf_jit_binary_lock_ro(header); + prog->bpf_func = (void *)ctx.target; + prog->jited = 1; + prog->jited_len = image_size; +@@ -2013,11 +2020,5 @@ out: + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +- +-out_free: +- image_ptr = NULL; +- bpf_jit_binary_free(header); +- prog = orig_prog; +- goto out_imms; + } + +--- a/arch/loongarch/net/bpf_jit.c ++++ b/arch/loongarch/net/bpf_jit.c +@@ -1206,19 +1206,16 @@ skip_init_ctx: + flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx)); + + if (!prog->is_func || extra_pass) { +- int err; +- + if (extra_pass && ctx.idx != jit_data->ctx.idx) { + pr_err_once("multi-func JIT bug %d != %d\n", + ctx.idx, jit_data->ctx.idx); +- goto out_free; +- } +- err = bpf_jit_binary_lock_ro(header); +- if (err) { +- pr_err_once("bpf_jit_binary_lock_ro() returned %d\n", +- err); +- goto out_free; ++ bpf_jit_binary_free(header); ++ prog->bpf_func = NULL; ++ prog->jited = 0; ++ prog->jited_len = 0; ++ goto out_offset; + } ++ bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; +@@ -1249,13 +1246,6 @@ out: + out_offset = -1; + + return prog; +- +-out_free: +- bpf_jit_binary_free(header); +- prog->bpf_func = NULL; +- prog->jited = 0; +- prog->jited_len = 0; +- goto out_offset; + } + + /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ +--- a/arch/mips/net/bpf_jit_comp.c ++++ b/arch/mips/net/bpf_jit_comp.c +@@ -1012,8 +1012,7 @@ struct bpf_prog *bpf_int_jit_compile(str + bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]); + + /* Set as read-only exec and flush instruction cache */ +- if (bpf_jit_binary_lock_ro(header)) +- goto out_err; ++ bpf_jit_binary_lock_ro(header); + flush_icache_range((unsigned long)header, + (unsigned long)&ctx.target[ctx.jit_index]); + +--- a/arch/parisc/net/bpf_jit_core.c ++++ b/arch/parisc/net/bpf_jit_core.c +@@ -167,13 +167,7 @@ skip_init_ctx: + bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); + + if (!prog->is_func || extra_pass) { +- if (bpf_jit_binary_lock_ro(jit_data->header)) { +- bpf_jit_binary_free(jit_data->header); +- prog->bpf_func = NULL; +- prog->jited = 0; +- prog->jited_len = 0; +- goto out_offset; +- } ++ bpf_jit_binary_lock_ro(jit_data->header); + prologue_len = ctx->epilogue_offset - ctx->body_len; + for (i = 0; i < prog->len; i++) + ctx->offset[i] += prologue_len; +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -1973,11 +1973,7 @@ skip_init_ctx: + print_fn_code(jit.prg_buf, jit.size_prg); + } + if (!fp->is_func || extra_pass) { +- if (bpf_jit_binary_lock_ro(header)) { +- bpf_jit_binary_free(header); +- fp = orig_fp; +- goto free_addrs; +- } ++ bpf_jit_binary_lock_ro(header); + } else { + jit_data->header = header; + jit_data->ctx = jit; +--- a/arch/sparc/net/bpf_jit_comp_64.c ++++ b/arch/sparc/net/bpf_jit_comp_64.c +@@ -1602,11 +1602,7 @@ skip_init_ctx: + bpf_flush_icache(header, (u8 *)header + header->size); + + if (!prog->is_func || extra_pass) { +- if (bpf_jit_binary_lock_ro(header)) { +- bpf_jit_binary_free(header); +- prog = orig_prog; +- goto out_off; +- } ++ bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; +--- a/arch/x86/net/bpf_jit_comp32.c ++++ b/arch/x86/net/bpf_jit_comp32.c +@@ -2600,7 +2600,8 @@ out_image: + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, proglen, pass + 1, image); + +- if (image && !bpf_jit_binary_lock_ro(header)) { ++ if (image) { ++ bpf_jit_binary_lock_ro(header); + prog->bpf_func = (void *)image; + prog->jited = 1; + prog->jited_len = proglen; +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -853,11 +853,10 @@ static inline int __must_check bpf_prog_ + return 0; + } + +-static inline int __must_check +-bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) ++static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) + { + set_vm_flush_reset_perms(hdr); +- return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); ++ set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); + } + + int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); diff --git a/queue-6.6/revert-powerpc-bpf-rename-powerpc64_jit_data-to-powerpc_jit_data.patch b/queue-6.6/revert-powerpc-bpf-rename-powerpc64_jit_data-to-powerpc_jit_data.patch new file mode 100644 index 00000000000..8577437e876 --- /dev/null +++ b/queue-6.6/revert-powerpc-bpf-rename-powerpc64_jit_data-to-powerpc_jit_data.patch @@ -0,0 +1,55 @@ +From 6e5869bb76784baa81409feaf3d13361037ec09f Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman +Date: Tue, 9 Jul 2024 11:14:07 +0200 +Subject: Revert "powerpc/bpf: rename powerpc64_jit_data to powerpc_jit_data" + +From: Greg Kroah-Hartman + +This reverts commit 10339194009208b3daae0c0b6e46ebea9bbfffcc which is +commit de04e40600ae15fa5e484be242e74aad6de7418f upstream. + +It is part of a series that is reported to both break the arm64 builds +and instantly crashes the powerpc systems at the first load of a bpf +program. So revert it for now until it can come back in a safe way. + +Reported-by: matoro +Reported-by: Vitaly Chikunov +Reported-by: WangYuli +Link: https://lore.kernel.org/r/5A29E00D83AB84E3+20240706031101.637601-1-wangyuli@uniontech.com +Link: https://lore.kernel.org/r/cf736c5e37489e7dc7ffd67b9de2ab47@matoro.tk +Cc: Hari Bathini +Cc: Song Liu +Cc: Michael Ellerman +Cc: Christophe Leroy +Cc: Kees Cook +Cc: Puranjay Mohan +Cc: Ilya Leoshkevich # s390x +Cc: Tiezhu Yang # LoongArch +Cc: Johan Almbladh # MIPS Part +Cc: Alexei Starovoitov +Cc: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/net/bpf_jit_comp.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/powerpc/net/bpf_jit_comp.c ++++ b/arch/powerpc/net/bpf_jit_comp.c +@@ -39,7 +39,7 @@ int bpf_jit_emit_exit_insn(u32 *image, s + return 0; + } + +-struct powerpc_jit_data { ++struct powerpc64_jit_data { + struct bpf_binary_header *header; + u32 *addrs; + u8 *image; +@@ -59,7 +59,7 @@ struct bpf_prog *bpf_int_jit_compile(str + u8 *image = NULL; + u32 *code_base; + u32 *addrs; +- struct powerpc_jit_data *jit_data; ++ struct powerpc64_jit_data *jit_data; + struct codegen_context cgctx; + int pass; + int flen; diff --git a/queue-6.6/revert-powerpc-bpf-use-bpf_jit_binary_pack_.patch b/queue-6.6/revert-powerpc-bpf-use-bpf_jit_binary_pack_.patch new file mode 100644 index 00000000000..2542abfcbea --- /dev/null +++ b/queue-6.6/revert-powerpc-bpf-use-bpf_jit_binary_pack_.patch @@ -0,0 +1,396 @@ +From 9944faf3599c0cbcae3b657fff9340d7075cd709 Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman +Date: Tue, 9 Jul 2024 11:14:06 +0200 +Subject: Revert "powerpc/bpf: use bpf_jit_binary_pack_[alloc|finalize|free]" + +From: Greg Kroah-Hartman + +This reverts commit f99feda5684a87d386a0fc5de1f18c653c5f62e0 which is +commit 90d862f370b6e9de1b5d607843c5a2f9823990f3 upstream. + +It is part of a series that is reported to both break the arm64 builds +and instantly crashes the powerpc systems at the first load of a bpf +program. So revert it for now until it can come back in a safe way. + +Reported-by: matoro +Reported-by: Vitaly Chikunov +Reported-by: WangYuli +Link: https://lore.kernel.org/r/5A29E00D83AB84E3+20240706031101.637601-1-wangyuli@uniontech.com +Link: https://lore.kernel.org/r/cf736c5e37489e7dc7ffd67b9de2ab47@matoro.tk +Cc: Hari Bathini +Cc: Song Liu +Cc: Michael Ellerman +Cc: Christophe Leroy +Cc: Kees Cook +Cc: Puranjay Mohan +Cc: Ilya Leoshkevich # s390x +Cc: Tiezhu Yang # LoongArch +Cc: Johan Almbladh # MIPS Part +Cc: Alexei Starovoitov +Cc: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/net/bpf_jit.h | 18 ++++-- + arch/powerpc/net/bpf_jit_comp.c | 106 +++++++++----------------------------- + arch/powerpc/net/bpf_jit_comp32.c | 13 ++-- + arch/powerpc/net/bpf_jit_comp64.c | 10 +-- + 4 files changed, 51 insertions(+), 96 deletions(-) + +--- a/arch/powerpc/net/bpf_jit.h ++++ b/arch/powerpc/net/bpf_jit.h +@@ -36,6 +36,9 @@ + EMIT(PPC_RAW_BRANCH(offset)); \ + } while (0) + ++/* bl (unconditional 'branch' with link) */ ++#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx))) ++ + /* "cond" here covers BO:BI fields. */ + #define PPC_BCC_SHORT(cond, dest) \ + do { \ +@@ -144,6 +147,12 @@ struct codegen_context { + #define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */ + #endif + ++static inline void bpf_flush_icache(void *start, void *end) ++{ ++ smp_wmb(); /* smp write barrier */ ++ flush_icache_range((unsigned long)start, (unsigned long)end); ++} ++ + static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i) + { + return ctx->seen & (1 << (31 - i)); +@@ -160,17 +169,16 @@ static inline void bpf_clear_seen_regist + } + + void bpf_jit_init_reg_mapping(struct codegen_context *ctx); +-int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func); +-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, ++int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func); ++int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, + u32 *addrs, int pass, bool extra_pass); + void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx); + void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx); + void bpf_jit_realloc_regs(struct codegen_context *ctx); + int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr); + +-int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, +- struct codegen_context *ctx, int insn_idx, +- int jmp_off, int dst_reg); ++int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx, ++ int insn_idx, int jmp_off, int dst_reg); + + #endif + +--- a/arch/powerpc/net/bpf_jit_comp.c ++++ b/arch/powerpc/net/bpf_jit_comp.c +@@ -40,12 +40,9 @@ int bpf_jit_emit_exit_insn(u32 *image, s + } + + struct powerpc_jit_data { +- /* address of rw header */ +- struct bpf_binary_header *hdr; +- /* address of ro final header */ +- struct bpf_binary_header *fhdr; ++ struct bpf_binary_header *header; + u32 *addrs; +- u8 *fimage; ++ u8 *image; + u32 proglen; + struct codegen_context ctx; + }; +@@ -66,14 +63,11 @@ struct bpf_prog *bpf_int_jit_compile(str + struct codegen_context cgctx; + int pass; + int flen; +- struct bpf_binary_header *fhdr = NULL; +- struct bpf_binary_header *hdr = NULL; ++ struct bpf_binary_header *bpf_hdr; + struct bpf_prog *org_fp = fp; + struct bpf_prog *tmp_fp; + bool bpf_blinded = false; + bool extra_pass = false; +- u8 *fimage = NULL; +- u32 *fcode_base; + u32 extable_len; + u32 fixup_len; + +@@ -103,16 +97,9 @@ struct bpf_prog *bpf_int_jit_compile(str + addrs = jit_data->addrs; + if (addrs) { + cgctx = jit_data->ctx; +- /* +- * JIT compiled to a writable location (image/code_base) first. +- * It is then moved to the readonly final location (fimage/fcode_base) +- * using instruction patching. +- */ +- fimage = jit_data->fimage; +- fhdr = jit_data->fhdr; ++ image = jit_data->image; ++ bpf_hdr = jit_data->header; + proglen = jit_data->proglen; +- hdr = jit_data->hdr; +- image = (void *)hdr + ((void *)fimage - (void *)fhdr); + extra_pass = true; + /* During extra pass, ensure index is reset before repopulating extable entries */ + cgctx.exentry_idx = 0; +@@ -132,7 +119,7 @@ struct bpf_prog *bpf_int_jit_compile(str + cgctx.stack_size = round_up(fp->aux->stack_depth, 16); + + /* Scouting faux-generate pass 0 */ +- if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { ++ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) { + /* We hit something illegal or unsupported. */ + fp = org_fp; + goto out_addrs; +@@ -147,7 +134,7 @@ struct bpf_prog *bpf_int_jit_compile(str + */ + if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) { + cgctx.idx = 0; +- if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { ++ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) { + fp = org_fp; + goto out_addrs; + } +@@ -169,19 +156,17 @@ struct bpf_prog *bpf_int_jit_compile(str + proglen = cgctx.idx * 4; + alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len; + +- fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image, +- bpf_jit_fill_ill_insns); +- if (!fhdr) { ++ bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns); ++ if (!bpf_hdr) { + fp = org_fp; + goto out_addrs; + } + + if (extable_len) +- fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len; ++ fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len; + + skip_init_ctx: + code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); +- fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE); + + /* Code generation passes 1-2 */ + for (pass = 1; pass < 3; pass++) { +@@ -189,10 +174,8 @@ skip_init_ctx: + cgctx.idx = 0; + cgctx.alt_exit_addr = 0; + bpf_jit_build_prologue(code_base, &cgctx); +- if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass, +- extra_pass)) { +- bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size)); +- bpf_jit_binary_pack_free(fhdr, hdr); ++ if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass, extra_pass)) { ++ bpf_jit_binary_free(bpf_hdr); + fp = org_fp; + goto out_addrs; + } +@@ -212,19 +195,17 @@ skip_init_ctx: + + #ifdef CONFIG_PPC64_ELF_ABI_V1 + /* Function descriptor nastiness: Address + TOC */ +- ((u64 *)image)[0] = (u64)fcode_base; ++ ((u64 *)image)[0] = (u64)code_base; + ((u64 *)image)[1] = local_paca->kernel_toc; + #endif + +- fp->bpf_func = (void *)fimage; ++ fp->bpf_func = (void *)image; + fp->jited = 1; + fp->jited_len = proglen + FUNCTION_DESCR_SIZE; + ++ bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size); + if (!fp->is_func || extra_pass) { +- if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) { +- fp = org_fp; +- goto out_addrs; +- } ++ bpf_jit_binary_lock_ro(bpf_hdr); + bpf_prog_fill_jited_linfo(fp, addrs); + out_addrs: + kfree(addrs); +@@ -234,9 +215,8 @@ out_addrs: + jit_data->addrs = addrs; + jit_data->ctx = cgctx; + jit_data->proglen = proglen; +- jit_data->fimage = fimage; +- jit_data->fhdr = fhdr; +- jit_data->hdr = hdr; ++ jit_data->image = image; ++ jit_data->header = bpf_hdr; + } + + out: +@@ -250,13 +230,12 @@ out: + * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling + * this function, as this only applies to BPF_PROBE_MEM, for now. + */ +-int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, +- struct codegen_context *ctx, int insn_idx, int jmp_off, +- int dst_reg) ++int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx, ++ int insn_idx, int jmp_off, int dst_reg) + { + off_t offset; + unsigned long pc; +- struct exception_table_entry *ex, *ex_entry; ++ struct exception_table_entry *ex; + u32 *fixup; + + /* Populate extable entries only in the last pass */ +@@ -267,16 +246,9 @@ int bpf_add_extable_entry(struct bpf_pro + WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries)) + return -EINVAL; + +- /* +- * Program is first written to image before copying to the +- * final location (fimage). Accordingly, update in the image first. +- * As all offsets used are relative, copying as is to the +- * final location should be alright. +- */ + pc = (unsigned long)&image[insn_idx]; +- ex = (void *)fp->aux->extable - (void *)fimage + (void *)image; + +- fixup = (void *)ex - ++ fixup = (void *)fp->aux->extable - + (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) + + (ctx->exentry_idx * BPF_FIXUP_LEN * 4); + +@@ -287,42 +259,18 @@ int bpf_add_extable_entry(struct bpf_pro + fixup[BPF_FIXUP_LEN - 1] = + PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]); + +- ex_entry = &ex[ctx->exentry_idx]; ++ ex = &fp->aux->extable[ctx->exentry_idx]; + +- offset = pc - (long)&ex_entry->insn; ++ offset = pc - (long)&ex->insn; + if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + return -ERANGE; +- ex_entry->insn = offset; ++ ex->insn = offset; + +- offset = (long)fixup - (long)&ex_entry->fixup; ++ offset = (long)fixup - (long)&ex->fixup; + if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + return -ERANGE; +- ex_entry->fixup = offset; ++ ex->fixup = offset; + + ctx->exentry_idx++; + return 0; + } +- +-void bpf_jit_free(struct bpf_prog *fp) +-{ +- if (fp->jited) { +- struct powerpc_jit_data *jit_data = fp->aux->jit_data; +- struct bpf_binary_header *hdr; +- +- /* +- * If we fail the final pass of JIT (from jit_subprogs), +- * the program may not be finalized yet. Call finalize here +- * before freeing it. +- */ +- if (jit_data) { +- bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr); +- kvfree(jit_data->addrs); +- kfree(jit_data); +- } +- hdr = bpf_jit_binary_pack_hdr(fp); +- bpf_jit_binary_pack_free(hdr, NULL); +- WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); +- } +- +- bpf_prog_unlock_free(fp); +-} +--- a/arch/powerpc/net/bpf_jit_comp32.c ++++ b/arch/powerpc/net/bpf_jit_comp32.c +@@ -200,13 +200,12 @@ void bpf_jit_build_epilogue(u32 *image, + EMIT(PPC_RAW_BLR()); + } + +-/* Relative offset needs to be calculated based on final image location */ +-int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) ++int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) + { +- s32 rel = (s32)func - (s32)(fimage + ctx->idx); ++ s32 rel = (s32)func - (s32)(image + ctx->idx); + + if (image && rel < 0x2000000 && rel >= -0x2000000) { +- EMIT(PPC_RAW_BL(rel)); ++ PPC_BL(func); + } else { + /* Load function address into r0 */ + EMIT(PPC_RAW_LIS(_R0, IMM_H(func))); +@@ -279,7 +278,7 @@ static int bpf_jit_emit_tail_call(u32 *i + } + + /* Assemble the body code between the prologue & epilogue */ +-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, ++int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, + u32 *addrs, int pass, bool extra_pass) + { + const struct bpf_insn *insn = fp->insnsi; +@@ -1010,7 +1009,7 @@ int bpf_jit_build_body(struct bpf_prog * + jmp_off += 4; + } + +- ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx, ++ ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx, + jmp_off, dst_reg); + if (ret) + return ret; +@@ -1066,7 +1065,7 @@ int bpf_jit_build_body(struct bpf_prog * + EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12)); + } + +- ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); ++ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr); + if (ret) + return ret; + +--- a/arch/powerpc/net/bpf_jit_comp64.c ++++ b/arch/powerpc/net/bpf_jit_comp64.c +@@ -240,7 +240,7 @@ static int bpf_jit_emit_func_call_hlp(u3 + return 0; + } + +-int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) ++int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) + { + unsigned int i, ctx_idx = ctx->idx; + +@@ -361,7 +361,7 @@ asm ( + ); + + /* Assemble the body code between the prologue & epilogue */ +-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, ++int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, + u32 *addrs, int pass, bool extra_pass) + { + enum stf_barrier_type stf_barrier = stf_barrier_type_get(); +@@ -952,8 +952,8 @@ emit_clear: + addrs[++i] = ctx->idx * 4; + + if (BPF_MODE(code) == BPF_PROBE_MEM) { +- ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, +- ctx->idx - 1, 4, dst_reg); ++ ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1, ++ 4, dst_reg); + if (ret) + return ret; + } +@@ -1007,7 +1007,7 @@ emit_clear: + if (func_addr_fixed) + ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr); + else +- ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); ++ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr); + + if (ret) + return ret; diff --git a/queue-6.6/series b/queue-6.6/series index e69de29bb2d..a7ab92b40ac 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -0,0 +1,4 @@ +revert-bpf-take-return-from-set_memory_rox-into-account-with-bpf_jit_binary_lock_ro.patch +revert-powerpc-bpf-use-bpf_jit_binary_pack_.patch +revert-powerpc-bpf-rename-powerpc64_jit_data-to-powerpc_jit_data.patch +revert-bpf-take-return-from-set_memory_ro-into-account-with-bpf_prog_lock_ro.patch -- 2.47.3