From: Greg Kroah-Hartman Date: Tue, 16 Jul 2024 14:24:52 +0000 (+0200) Subject: 5.10-stable patches X-Git-Tag: v4.19.318~20 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=f0bca53258af7fb40d4cfb59f83fdcff215cb598;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch --- diff --git a/queue-5.10/arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch b/queue-5.10/arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch new file mode 100644 index 00000000000..a8f10573ff2 --- /dev/null +++ b/queue-5.10/arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch @@ -0,0 +1,137 @@ +From b89ddf4cca43f1269093942cf5c4e457fd45c335 Mon Sep 17 00:00:00 2001 +From: Russell King +Date: Fri, 5 Nov 2021 16:50:45 +0000 +Subject: arm64/bpf: Remove 128MB limit for BPF JIT programs + +From: Russell King + +commit b89ddf4cca43f1269093942cf5c4e457fd45c335 upstream. + +Commit 91fc957c9b1d ("arm64/bpf: don't allocate BPF JIT programs in module +memory") restricts BPF JIT program allocation to a 128MB region to ensure +BPF programs are still in branching range of each other. However this +restriction should not apply to the aarch64 JIT, since BPF_JMP | BPF_CALL +are implemented as a 64-bit move into a register and then a BLR instruction - +which has the effect of being able to call anything without proximity +limitation. + +The practical reason to relax this restriction on JIT memory is that 128MB of +JIT memory can be quickly exhausted, especially where PAGE_SIZE is 64KB - one +page is needed per program. In cases where seccomp filters are applied to +multiple VMs on VM launch - such filters are classic BPF but converted to +BPF - this can severely limit the number of VMs that can be launched. In a +world where we support BPF JIT always on, turning off the JIT isn't always an +option either. + +Fixes: 91fc957c9b1d ("arm64/bpf: don't allocate BPF JIT programs in module memory") +Suggested-by: Ard Biesheuvel +Signed-off-by: Russell King +Signed-off-by: Daniel Borkmann +Tested-by: Alan Maguire +Link: https://lore.kernel.org/bpf/1636131046-5982-2-git-send-email-alan.maguire@oracle.com +[Replace usage of in_bpf_jit() with is_bpf_text_address()] +Signed-off-by: Puranjay Mohan +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm64/include/asm/extable.h | 9 --------- + arch/arm64/include/asm/memory.h | 5 +---- + arch/arm64/kernel/traps.c | 2 +- + arch/arm64/mm/extable.c | 3 ++- + arch/arm64/mm/ptdump.c | 2 -- + arch/arm64/net/bpf_jit_comp.c | 7 ++----- + 6 files changed, 6 insertions(+), 22 deletions(-) + +--- a/arch/arm64/include/asm/extable.h ++++ b/arch/arm64/include/asm/extable.h +@@ -22,15 +22,6 @@ struct exception_table_entry + + #define ARCH_HAS_RELATIVE_EXTABLE + +-static inline bool in_bpf_jit(struct pt_regs *regs) +-{ +- if (!IS_ENABLED(CONFIG_BPF_JIT)) +- return false; +- +- return regs->pc >= BPF_JIT_REGION_START && +- regs->pc < BPF_JIT_REGION_END; +-} +- + #ifdef CONFIG_BPF_JIT + int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs); +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -44,11 +44,8 @@ + #define _PAGE_OFFSET(va) (-(UL(1) << (va))) + #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) + #define KIMAGE_VADDR (MODULES_END) +-#define BPF_JIT_REGION_START (KASAN_SHADOW_END) +-#define BPF_JIT_REGION_SIZE (SZ_128M) +-#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) + #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) +-#define MODULES_VADDR (BPF_JIT_REGION_END) ++#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) + #define MODULES_VSIZE (SZ_128M) + #define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) + #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -912,7 +912,7 @@ static struct break_hook bug_break_hook + static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) + { + pr_err("%s generated an invalid instruction at %pS!\n", +- in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching", ++ "Kernel text patching", + (void *)instruction_pointer(regs)); + + /* We cannot handle this */ +--- a/arch/arm64/mm/extable.c ++++ b/arch/arm64/mm/extable.c +@@ -5,6 +5,7 @@ + + #include + #include ++#include + + int fixup_exception(struct pt_regs *regs) + { +@@ -14,7 +15,7 @@ int fixup_exception(struct pt_regs *regs + if (!fixup) + return 0; + +- if (in_bpf_jit(regs)) ++ if (is_bpf_text_address(regs->pc)) + return arm64_bpf_fixup_exception(fixup, regs); + + regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; +--- a/arch/arm64/mm/ptdump.c ++++ b/arch/arm64/mm/ptdump.c +@@ -41,8 +41,6 @@ static struct addr_marker address_marker + { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, + { KASAN_SHADOW_END, "Kasan shadow end" }, + #endif +- { BPF_JIT_REGION_START, "BPF start" }, +- { BPF_JIT_REGION_END, "BPF end" }, + { MODULES_VADDR, "Modules start" }, + { MODULES_END, "Modules end" }, + { VMALLOC_START, "vmalloc() area" }, +--- a/arch/arm64/net/bpf_jit_comp.c ++++ b/arch/arm64/net/bpf_jit_comp.c +@@ -1145,15 +1145,12 @@ out: + + u64 bpf_jit_alloc_exec_limit(void) + { +- return BPF_JIT_REGION_SIZE; ++ return VMALLOC_END - VMALLOC_START; + } + + void *bpf_jit_alloc_exec(unsigned long size) + { +- return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, +- BPF_JIT_REGION_END, GFP_KERNEL, +- PAGE_KERNEL, 0, NUMA_NO_NODE, +- __builtin_return_address(0)); ++ return vmalloc(size); + } + + void bpf_jit_free_exec(void *addr) diff --git a/queue-5.10/series b/queue-5.10/series index 5cb6b66d58d..82b3c4d84dd 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -97,3 +97,4 @@ ipv6-annotate-data-races-around-cnf.disable_ipv6.patch ipv6-prevent-null-dereference-in-ip6_output.patch bpf-allow-reads-from-uninit-stack.patch nilfs2-fix-kernel-bug-on-rename-operation-of-broken-directory.patch +arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch