]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.1
authorSasha Levin <sashal@kernel.org>
Fri, 25 Oct 2024 10:20:39 +0000 (06:20 -0400)
committerSasha Levin <sashal@kernel.org>
Fri, 25 Oct 2024 10:20:39 +0000 (06:20 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.1/jfs-fix-sanity-check-in-dbmount.patch [new file with mode: 0644]
queue-6.1/loongarch-add-support-to-clone-a-time-namespace.patch [new file with mode: 0644]
queue-6.1/loongarch-don-t-crash-in-stack_top-for-tasks-without.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/tracing-consider-the-null-character-when-validating-.patch [new file with mode: 0644]

diff --git a/queue-6.1/jfs-fix-sanity-check-in-dbmount.patch b/queue-6.1/jfs-fix-sanity-check-in-dbmount.patch
new file mode 100644 (file)
index 0000000..a9c1ca0
--- /dev/null
@@ -0,0 +1,35 @@
+From 03110c4e7d004bff32b29cfe17b291457029ee77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 09:40:37 -0500
+Subject: jfs: Fix sanity check in dbMount
+
+From: Dave Kleikamp <dave.kleikamp@oracle.com>
+
+[ Upstream commit 67373ca8404fe57eb1bb4b57f314cff77ce54932 ]
+
+MAXAG is a legitimate value for bmp->db_numag
+
+Fixes: e63866a47556 ("jfs: fix out-of-bounds in dbNextAG() and diAlloc()")
+
+Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jfs/jfs_dmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 384aa6dfdb95e..a6b1d748df16b 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap)
+       }
+       bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+-      if (!bmp->db_numag || bmp->db_numag >= MAXAG) {
++      if (!bmp->db_numag || bmp->db_numag > MAXAG) {
+               err = -EINVAL;
+               goto err_release_metapage;
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.1/loongarch-add-support-to-clone-a-time-namespace.patch b/queue-6.1/loongarch-add-support-to-clone-a-time-namespace.patch
new file mode 100644 (file)
index 0000000..9756253
--- /dev/null
@@ -0,0 +1,360 @@
+From 1ca1890bfd2afb8ee256b1afcf9f1f106c2b54d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jun 2023 20:58:43 +0800
+Subject: LoongArch: Add support to clone a time namespace
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit aa5e65dc0818bbf676bf06927368ec46867778fd ]
+
+We can see that "Time namespaces are not supported" on LoongArch:
+
+(1) clone3 test
+  # cd tools/testing/selftests/clone3 && make && ./clone3
+  ...
+  # Time namespaces are not supported
+  ok 18 # SKIP Skipping clone3() with CLONE_NEWTIME
+  # Totals: pass:17 fail:0 xfail:0 xpass:0 skip:1 error:0
+
+(2) timens test
+  # cd tools/testing/selftests/timens && make && ./timens
+  ...
+  1..0 # SKIP Time namespaces are not supported
+
+On LoongArch the current kernel does not support CONFIG_TIME_NS which
+depends on GENERIC_VDSO_TIME_NS, select GENERIC_VDSO_TIME_NS to enable
+CONFIG_TIME_NS to build kernel/time/namespace.c.
+
+Additionally, it needs to define some arch-dependent functions for the
+timens, such as __arch_get_timens_vdso_data(), arch_get_vdso_data() and
+vdso_join_timens().
+
+At the same time, modify the layout of vvar to use one page size for
+generic vdso data, expand another page size for timens vdso data and
+assign LOONGARCH_VDSO_DATA_SIZE (maybe exceeds a page size if expand in
+the future) for loongarch vdso data, at last add the callback function
+vvar_fault() and modify stack_top().
+
+With this patch under CONFIG_TIME_NS:
+
+(1) clone3 test
+  # cd tools/testing/selftests/clone3 && make && ./clone3
+  ...
+  ok 18 [739] Result (0) matches expectation (0)
+  # Totals: pass:18 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+(2) timens test
+  # cd tools/testing/selftests/timens && make && ./timens
+  ...
+  # Totals: pass:10 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Stable-dep-of: 134475a9ab84 ("LoongArch: Don't crash in stack_top() for tasks without vDSO")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/Kconfig                        |  1 +
+ arch/loongarch/include/asm/page.h             |  1 +
+ .../loongarch/include/asm/vdso/gettimeofday.h |  9 +-
+ arch/loongarch/include/asm/vdso/vdso.h        | 32 +++++-
+ arch/loongarch/kernel/process.c               |  2 +-
+ arch/loongarch/kernel/vdso.c                  | 98 ++++++++++++++++---
+ arch/loongarch/vdso/vgetcpu.c                 |  2 +-
+ 7 files changed, 121 insertions(+), 24 deletions(-)
+
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index fa3171f563274..f4ba3638b76a8 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -78,6 +78,7 @@ config LOONGARCH
+       select GENERIC_SCHED_CLOCK
+       select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_TIME_VSYSCALL
++      select GENERIC_VDSO_TIME_NS
+       select GPIOLIB
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_MMAP_RND_BITS if MMU
+diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
+index 53f284a961823..bbac81dd73788 100644
+--- a/arch/loongarch/include/asm/page.h
++++ b/arch/loongarch/include/asm/page.h
+@@ -81,6 +81,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
+ #define __va(x)               ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+ #define pfn_to_kaddr(pfn)     __va((pfn) << PAGE_SHIFT)
++#define sym_to_pfn(x)         __phys_to_pfn(__pa_symbol(x))
+ #ifdef CONFIG_FLATMEM
+diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
+index 7b2cd37641e2a..89e6b222c2f2d 100644
+--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
++++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
+@@ -91,9 +91,16 @@ static inline bool loongarch_vdso_hres_capable(void)
+ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+ {
+-      return get_vdso_data();
++      return (const struct vdso_data *)get_vdso_data();
+ }
++#ifdef CONFIG_TIME_NS
++static __always_inline
++const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
++{
++      return (const struct vdso_data *)(get_vdso_data() + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE);
++}
++#endif
+ #endif /* !__ASSEMBLY__ */
+ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
+diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
+index 3b55d32a0619c..5a12309d9fb55 100644
+--- a/arch/loongarch/include/asm/vdso/vdso.h
++++ b/arch/loongarch/include/asm/vdso/vdso.h
+@@ -16,10 +16,33 @@ struct vdso_pcpu_data {
+ struct loongarch_vdso_data {
+       struct vdso_pcpu_data pdata[NR_CPUS];
+-      struct vdso_data data[CS_BASES]; /* Arch-independent data */
+ };
+-#define VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
++/*
++ * The layout of vvar:
++ *
++ *                      high
++ * +---------------------+--------------------------+
++ * | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE |
++ * +---------------------+--------------------------+
++ * |  time-ns vdso data  |        PAGE_SIZE         |
++ * +---------------------+--------------------------+
++ * |  generic vdso data  |        PAGE_SIZE         |
++ * +---------------------+--------------------------+
++ *                      low
++ */
++#define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
++#define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT)
++
++enum vvar_pages {
++      VVAR_GENERIC_PAGE_OFFSET,
++      VVAR_TIMENS_PAGE_OFFSET,
++      VVAR_LOONGARCH_PAGES_START,
++      VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1,
++      VVAR_NR_PAGES,
++};
++
++#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
+ static inline unsigned long get_vdso_base(void)
+ {
+@@ -34,10 +57,9 @@ static inline unsigned long get_vdso_base(void)
+       return addr;
+ }
+-static inline const struct vdso_data *get_vdso_data(void)
++static inline unsigned long get_vdso_data(void)
+ {
+-      return (const struct vdso_data *)(get_vdso_base()
+-                      - VDSO_DATA_SIZE + SMP_CACHE_BYTES * NR_CPUS);
++      return get_vdso_base() - VVAR_SIZE;
+ }
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
+index 1259bc3129790..51176e5ecee59 100644
+--- a/arch/loongarch/kernel/process.c
++++ b/arch/loongarch/kernel/process.c
+@@ -273,7 +273,7 @@ unsigned long stack_top(void)
+       /* Space for the VDSO & data page */
+       top -= PAGE_ALIGN(current->thread.vdso->size);
+-      top -= PAGE_SIZE;
++      top -= VVAR_SIZE;
+       /* Space to randomize the VDSO base */
+       if (current->flags & PF_RANDOMIZE)
+diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
+index 8c9826062652e..59aa9dd466e84 100644
+--- a/arch/loongarch/kernel/vdso.c
++++ b/arch/loongarch/kernel/vdso.c
+@@ -14,6 +14,7 @@
+ #include <linux/random.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/time_namespace.h>
+ #include <linux/timekeeper_internal.h>
+ #include <asm/page.h>
+@@ -26,12 +27,17 @@ extern char vdso_start[], vdso_end[];
+ /* Kernel-provided data used by the VDSO. */
+ static union {
+-      u8 page[VDSO_DATA_SIZE];
++      u8 page[PAGE_SIZE];
++      struct vdso_data data[CS_BASES];
++} generic_vdso_data __page_aligned_data;
++
++static union {
++      u8 page[LOONGARCH_VDSO_DATA_SIZE];
+       struct loongarch_vdso_data vdata;
+ } loongarch_vdso_data __page_aligned_data;
+ static struct page *vdso_pages[] = { NULL };
+-struct vdso_data *vdso_data = loongarch_vdso_data.vdata.data;
++struct vdso_data *vdso_data = generic_vdso_data.data;
+ struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
+ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
+@@ -41,6 +47,43 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
+       return 0;
+ }
++static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
++                           struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      unsigned long pfn;
++      struct page *timens_page = find_timens_vvar_page(vma);
++
++      switch (vmf->pgoff) {
++      case VVAR_GENERIC_PAGE_OFFSET:
++              if (!timens_page)
++                      pfn = sym_to_pfn(vdso_data);
++              else
++                      pfn = page_to_pfn(timens_page);
++              break;
++#ifdef CONFIG_TIME_NS
++      case VVAR_TIMENS_PAGE_OFFSET:
++              /*
++               * If a task belongs to a time namespace then a namespace specific
++               * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
++               * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
++               * See also the comment near timens_setup_vdso_data().
++               */
++              if (!timens_page)
++                      return VM_FAULT_SIGBUS;
++              else
++                      pfn = sym_to_pfn(vdso_data);
++              break;
++#endif /* CONFIG_TIME_NS */
++      case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
++              pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
++              break;
++      default:
++              return VM_FAULT_SIGBUS;
++      }
++
++      return vmf_insert_pfn(vma, vmf->address, pfn);
++}
++
+ struct loongarch_vdso_info vdso_info = {
+       .vdso = vdso_start,
+       .size = PAGE_SIZE,
+@@ -51,6 +94,7 @@ struct loongarch_vdso_info vdso_info = {
+       },
+       .data_mapping = {
+               .name = "[vvar]",
++              .fault = vvar_fault,
+       },
+       .offset_sigreturn = vdso_offset_sigreturn,
+ };
+@@ -73,6 +117,37 @@ static int __init init_vdso(void)
+ }
+ subsys_initcall(init_vdso);
++#ifdef CONFIG_TIME_NS
++struct vdso_data *arch_get_vdso_data(void *vvar_page)
++{
++      return (struct vdso_data *)(vvar_page);
++}
++
++/*
++ * The vvar mapping contains data for a specific time namespace, so when a
++ * task changes namespace we must unmap its vvar data for the old namespace.
++ * Subsequent faults will map in data for the new namespace.
++ *
++ * For more details see timens_setup_vdso_data().
++ */
++int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
++{
++      struct mm_struct *mm = task->mm;
++      struct vm_area_struct *vma;
++
++      VMA_ITERATOR(vmi, mm, 0);
++
++      mmap_read_lock(mm);
++      for_each_vma(vmi, vma) {
++              if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
++                      zap_vma_pages(vma);
++      }
++      mmap_read_unlock(mm);
++
++      return 0;
++}
++#endif
++
+ static unsigned long vdso_base(void)
+ {
+       unsigned long base = STACK_TOP;
+@@ -88,7 +163,7 @@ static unsigned long vdso_base(void)
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+       int ret;
+-      unsigned long vvar_size, size, data_addr, vdso_addr;
++      unsigned long size, data_addr, vdso_addr;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       struct loongarch_vdso_info *info = current->thread.vdso;
+@@ -100,32 +175,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+        * Determine total area size. This includes the VDSO data itself
+        * and the data pages.
+        */
+-      vvar_size = VDSO_DATA_SIZE;
+-      size = vvar_size + info->size;
++      size = VVAR_SIZE + info->size;
+       data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+       if (IS_ERR_VALUE(data_addr)) {
+               ret = data_addr;
+               goto out;
+       }
+-      vdso_addr = data_addr + VDSO_DATA_SIZE;
+-      vma = _install_special_mapping(mm, data_addr, vvar_size,
+-                                     VM_READ | VM_MAYREAD,
++      vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
++                                     VM_READ | VM_MAYREAD | VM_PFNMAP,
+                                      &info->data_mapping);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto out;
+       }
+-      /* Map VDSO data page. */
+-      ret = remap_pfn_range(vma, data_addr,
+-                            virt_to_phys(&loongarch_vdso_data) >> PAGE_SHIFT,
+-                            vvar_size, PAGE_READONLY);
+-      if (ret)
+-              goto out;
+-
+-      /* Map VDSO code page. */
++      vdso_addr = data_addr + VVAR_SIZE;
+       vma = _install_special_mapping(mm, vdso_addr, info->size,
+                                      VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+                                      &info->code_mapping);
+diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
+index e02e775f53608..9e445be39763a 100644
+--- a/arch/loongarch/vdso/vgetcpu.c
++++ b/arch/loongarch/vdso/vgetcpu.c
+@@ -21,7 +21,7 @@ static __always_inline int read_cpu_id(void)
+ static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
+ {
+-      return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE);
++      return (struct vdso_pcpu_data *)(get_vdso_data() + VVAR_LOONGARCH_PAGES_START * PAGE_SIZE);
+ }
+ extern
+-- 
+2.43.0
+
diff --git a/queue-6.1/loongarch-don-t-crash-in-stack_top-for-tasks-without.patch b/queue-6.1/loongarch-don-t-crash-in-stack_top-for-tasks-without.patch
new file mode 100644 (file)
index 0000000..e09e3f6
--- /dev/null
@@ -0,0 +1,67 @@
+From b44ed01e4b5c6dc6297a7a0dde8e495ed8e9e878 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Oct 2024 22:11:19 +0800
+Subject: LoongArch: Don't crash in stack_top() for tasks without vDSO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
+
+[ Upstream commit 134475a9ab8487527238d270639a8cb74c10aab2 ]
+
+Not all tasks have a vDSO mapped, for example kthreads never do. If such
+a task ever ends up calling stack_top(), it will derefence the NULL vdso
+pointer and crash.
+
+This can for example happen when using kunit:
+
+       [<9000000000203874>] stack_top+0x58/0xa8
+       [<90000000002956cc>] arch_pick_mmap_layout+0x164/0x220
+       [<90000000003c284c>] kunit_vm_mmap_init+0x108/0x12c
+       [<90000000003c1fbc>] __kunit_add_resource+0x38/0x8c
+       [<90000000003c2704>] kunit_vm_mmap+0x88/0xc8
+       [<9000000000410b14>] usercopy_test_init+0xbc/0x25c
+       [<90000000003c1db4>] kunit_try_run_case+0x5c/0x184
+       [<90000000003c3d54>] kunit_generic_run_threadfn_adapter+0x24/0x48
+       [<900000000022e4bc>] kthread+0xc8/0xd4
+       [<9000000000200ce8>] ret_from_kernel_thread+0xc/0xa4
+
+Fixes: 803b0fc5c3f2 ("LoongArch: Add process management")
+Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/process.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
+index 51176e5ecee59..4561bc81c0639 100644
+--- a/arch/loongarch/kernel/process.c
++++ b/arch/loongarch/kernel/process.c
+@@ -271,13 +271,15 @@ unsigned long stack_top(void)
+ {
+       unsigned long top = TASK_SIZE & PAGE_MASK;
+-      /* Space for the VDSO & data page */
+-      top -= PAGE_ALIGN(current->thread.vdso->size);
+-      top -= VVAR_SIZE;
+-
+-      /* Space to randomize the VDSO base */
+-      if (current->flags & PF_RANDOMIZE)
+-              top -= VDSO_RANDOMIZE_SIZE;
++      if (current->thread.vdso) {
++              /* Space for the VDSO & data page */
++              top -= PAGE_ALIGN(current->thread.vdso->size);
++              top -= VVAR_SIZE;
++
++              /* Space to randomize the VDSO base */
++              if (current->flags & PF_RANDOMIZE)
++                      top -= VDSO_RANDOMIZE_SIZE;
++      }
+       return top;
+ }
+-- 
+2.43.0
+
index 9a128d62bcdf08a31b2fc6a41ae13a40dbd08669..6c43245276318e04db30a2ceced8dc864bd25d21 100644 (file)
@@ -79,3 +79,7 @@ udf-refactor-udf_current_aext-to-handle-error.patch
 udf-fix-uninit-value-use-in-udf_get_fileshortad.patch
 asoc-qcom-sm8250-add-qrb4210-rb2-sndcard-compatible-.patch
 platform-x86-dell-sysman-add-support-for-alienware-p.patch
+loongarch-add-support-to-clone-a-time-namespace.patch
+loongarch-don-t-crash-in-stack_top-for-tasks-without.patch
+jfs-fix-sanity-check-in-dbmount.patch
+tracing-consider-the-null-character-when-validating-.patch
diff --git a/queue-6.1/tracing-consider-the-null-character-when-validating-.patch b/queue-6.1/tracing-consider-the-null-character-when-validating-.patch
new file mode 100644 (file)
index 0000000..dca6f70
--- /dev/null
@@ -0,0 +1,42 @@
+From 7d76c50c18c4b9112652e91a23c590c359569059 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 15:47:24 +0100
+Subject: tracing: Consider the NULL character when validating the event length
+
+From: Leo Yan <leo.yan@arm.com>
+
+[ Upstream commit 0b6e2e22cb23105fcb171ab92f0f7516c69c8471 ]
+
+strlen() returns a string length excluding the null byte. If the string
+length equals to the maximum buffer length, the buffer will have no
+space for the NULL terminating character.
+
+This commit checks this condition and returns failure for it.
+
+Link: https://lore.kernel.org/all/20241007144724.920954-1-leo.yan@arm.com/
+
+Fixes: dec65d79fd26 ("tracing/probe: Check event name length correctly")
+Signed-off-by: Leo Yan <leo.yan@arm.com>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_probe.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index eef9806bb9b14..ba48b5e270e1f 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -265,7 +265,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
+               }
+               trace_probe_log_err(offset, NO_EVENT_NAME);
+               return -EINVAL;
+-      } else if (len > MAX_EVENT_NAME_LEN) {
++      } else if (len >= MAX_EVENT_NAME_LEN) {
+               trace_probe_log_err(offset, EVENT_TOO_LONG);
+               return -EINVAL;
+       }
+-- 
+2.43.0
+