]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Jan 2022 16:14:13 +0000 (17:14 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Jan 2022 16:14:13 +0000 (17:14 +0100)
added patches:
arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch

queue-5.15/arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch [new file with mode: 0644]
queue-5.15/series

diff --git a/queue-5.15/arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch b/queue-5.15/arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch
new file mode 100644 (file)
index 0000000..52040ce
--- /dev/null
@@ -0,0 +1,144 @@
+From b89ddf4cca43f1269093942cf5c4e457fd45c335 Mon Sep 17 00:00:00 2001
+From: Russell King <russell.king@oracle.com>
+Date: Fri, 5 Nov 2021 16:50:45 +0000
+Subject: arm64/bpf: Remove 128MB limit for BPF JIT programs
+
+From: Russell King <russell.king@oracle.com>
+
+commit b89ddf4cca43f1269093942cf5c4e457fd45c335 upstream.
+
+Commit 91fc957c9b1d ("arm64/bpf: don't allocate BPF JIT programs in module
+memory") restricts BPF JIT program allocation to a 128MB region to ensure
+BPF programs are still in branching range of each other. However this
+restriction should not apply to the aarch64 JIT, since BPF_JMP | BPF_CALL
+are implemented as a 64-bit move into a register and then a BLR instruction -
+which has the effect of being able to call anything without proximity
+limitation.
+
+The practical reason to relax this restriction on JIT memory is that 128MB of
+JIT memory can be quickly exhausted, especially where PAGE_SIZE is 64KB - one
+page is needed per program. In cases where seccomp filters are applied to
+multiple VMs on VM launch - such filters are classic BPF but converted to
+BPF - this can severely limit the number of VMs that can be launched. In a
+world where we support BPF JIT always on, turning off the JIT isn't always an
+option either.
+
+Fixes: 91fc957c9b1d ("arm64/bpf: don't allocate BPF JIT programs in module memory")
+Suggested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Russell King <russell.king@oracle.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Alan Maguire <alan.maguire@oracle.com>
+Link: https://lore.kernel.org/bpf/1636131046-5982-2-git-send-email-alan.maguire@oracle.com
+Reviewed-by: Tom Saeger <tom.saeger@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/extable.h |    9 ---------
+ arch/arm64/include/asm/memory.h  |    5 +----
+ arch/arm64/kernel/traps.c        |    2 +-
+ arch/arm64/mm/extable.c          |   13 +++++++++----
+ arch/arm64/mm/ptdump.c           |    2 --
+ arch/arm64/net/bpf_jit_comp.c    |    7 ++-----
+ 6 files changed, 13 insertions(+), 25 deletions(-)
+
+--- a/arch/arm64/include/asm/extable.h
++++ b/arch/arm64/include/asm/extable.h
+@@ -22,15 +22,6 @@ struct exception_table_entry
+ #define ARCH_HAS_RELATIVE_EXTABLE
+-static inline bool in_bpf_jit(struct pt_regs *regs)
+-{
+-      if (!IS_ENABLED(CONFIG_BPF_JIT))
+-              return false;
+-
+-      return regs->pc >= BPF_JIT_REGION_START &&
+-             regs->pc < BPF_JIT_REGION_END;
+-}
+-
+ #ifdef CONFIG_BPF_JIT
+ int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
+                             struct pt_regs *regs);
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -44,11 +44,8 @@
+ #define _PAGE_OFFSET(va)      (-(UL(1) << (va)))
+ #define PAGE_OFFSET           (_PAGE_OFFSET(VA_BITS))
+ #define KIMAGE_VADDR          (MODULES_END)
+-#define BPF_JIT_REGION_START  (_PAGE_END(VA_BITS_MIN))
+-#define BPF_JIT_REGION_SIZE   (SZ_128M)
+-#define BPF_JIT_REGION_END    (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
+ #define MODULES_END           (MODULES_VADDR + MODULES_VSIZE)
+-#define MODULES_VADDR         (BPF_JIT_REGION_END)
++#define MODULES_VADDR         (_PAGE_END(VA_BITS_MIN))
+ #define MODULES_VSIZE         (SZ_128M)
+ #define VMEMMAP_START         (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
+ #define VMEMMAP_END           (VMEMMAP_START + VMEMMAP_SIZE)
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -988,7 +988,7 @@ static struct break_hook bug_break_hook
+ static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
+ {
+       pr_err("%s generated an invalid instruction at %pS!\n",
+-              in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
++              "Kernel text patching",
+               (void *)instruction_pointer(regs));
+       /* We cannot handle this */
+--- a/arch/arm64/mm/extable.c
++++ b/arch/arm64/mm/extable.c
+@@ -9,14 +9,19 @@
+ int fixup_exception(struct pt_regs *regs)
+ {
+       const struct exception_table_entry *fixup;
++      unsigned long addr;
+-      fixup = search_exception_tables(instruction_pointer(regs));
+-      if (!fixup)
+-              return 0;
++      addr = instruction_pointer(regs);
+-      if (in_bpf_jit(regs))
++      /* Search the BPF tables first, these are formatted differently */
++      fixup = search_bpf_extables(addr);
++      if (fixup)
+               return arm64_bpf_fixup_exception(fixup, regs);
++      fixup = search_exception_tables(addr);
++      if (!fixup)
++              return 0;
++
+       regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
+       return 1;
+ }
+--- a/arch/arm64/mm/ptdump.c
++++ b/arch/arm64/mm/ptdump.c
+@@ -41,8 +41,6 @@ static struct addr_marker address_marker
+       { 0 /* KASAN_SHADOW_START */,   "Kasan shadow start" },
+       { KASAN_SHADOW_END,             "Kasan shadow end" },
+ #endif
+-      { BPF_JIT_REGION_START,         "BPF start" },
+-      { BPF_JIT_REGION_END,           "BPF end" },
+       { MODULES_VADDR,                "Modules start" },
+       { MODULES_END,                  "Modules end" },
+       { VMALLOC_START,                "vmalloc() area" },
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1138,15 +1138,12 @@ out:
+ u64 bpf_jit_alloc_exec_limit(void)
+ {
+-      return BPF_JIT_REGION_SIZE;
++      return VMALLOC_END - VMALLOC_START;
+ }
+ void *bpf_jit_alloc_exec(unsigned long size)
+ {
+-      return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
+-                                  BPF_JIT_REGION_END, GFP_KERNEL,
+-                                  PAGE_KERNEL, 0, NUMA_NO_NODE,
+-                                  __builtin_return_address(0));
++      return vmalloc(size);
+ }
+ void bpf_jit_free_exec(void *addr)
index 2eb031c3c8b1e469883b1f27a3e9910fbcd70115..435f1730dc617daedab1eb2e60112a8fbbc3d0a5 100644 (file)
@@ -9,3 +9,4 @@ memcg-better-bounds-on-the-memcg-stats-updates.patch
 rcu-tighten-rcu_advance_cbs_nowake-checks.patch
 select-fix-indefinitely-sleeping-task-in-poll_schedule_timeout.patch
 drm-amdgpu-use-correct-viewport_dimension-for-dcn2.patch
+arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch