--- /dev/null
+From deller@gmx.de Thu Aug 31 12:49:41 2023
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 28 Aug 2023 23:55:55 +0200
+Subject: io_uring/parisc: Adjust pgoff in io_uring mmap() for parisc
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>, linux-parisc@vger.kernel.org, Jens Axboe <axboe@kernel.dk>
+Cc: Vidra.Jonas@seznam.cz, Sam James <sam@gentoo.org>, John David Anglin <dave.anglin@bell.net>
+Message-ID: <ZO0X64s72JpFJnRM@p100>
+Content-Disposition: inline
+
+From: Helge Deller <deller@gmx.de>
+
+Vidra Jonas reported issues on parisc with libuv which then triggers
+build errors with cmake. Debugging shows that those issues stem from
+io_uring().
+
+I was not able to easily pull in upstream commits directly, so here
+is IMHO the least invasive manual backport of the following upstream
+commits to fix the cache aliasing issues on parisc on kernel 6.1
+with io_uring:
+
+56675f8b9f9b ("io_uring/parisc: Adjust pgoff in io_uring mmap() for parisc")
+32832a407a71 ("io_uring: Fix io_uring mmap() by using architecture-provided get_unmapped_area()")
+d808459b2e31 ("io_uring: Adjust mapping wrt architecture aliasing requirements")
+
+With this patch kernel 6.1 has all relevant mmap changes and is
+identical to kernel 6.5 with regard to mmap() in io_uring.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Reported-by: Vidra.Jonas@seznam.cz
+Link: https://lore.kernel.org/linux-parisc/520.NvTX.6mXZpmfh4Ju.1awpAS@seznam.cz/
+Cc: Sam James <sam@gentoo.org>
+Cc: John David Anglin <dave.anglin@bell.net>
+Cc: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ io_uring/io_uring.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 46 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -72,6 +72,7 @@
+ #include <linux/io_uring.h>
+ #include <linux/audit.h>
+ #include <linux/security.h>
++#include <asm/shmparam.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/io_uring.h>
+@@ -3110,6 +3111,49 @@ static __cold int io_uring_mmap(struct f
+ return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
+ }
+
++static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp,
++ unsigned long addr, unsigned long len,
++ unsigned long pgoff, unsigned long flags)
++{
++ void *ptr;
++
++ /*
++ * Do not allow to map to user-provided address to avoid breaking the
++ * aliasing rules. Userspace is not able to guess the offset address of
++ * kernel kmalloc()ed memory area.
++ */
++ if (addr)
++ return -EINVAL;
++
++ ptr = io_uring_validate_mmap_request(filp, pgoff, len);
++ if (IS_ERR(ptr))
++ return -ENOMEM;
++
++ /*
++ * Some architectures have strong cache aliasing requirements.
++ * For such architectures we need a coherent mapping which aliases
++ * kernel memory *and* userspace memory. To achieve that:
++ * - use a NULL file pointer to reference physical memory, and
++ * - use the kernel virtual address of the shared io_uring context
++ * (instead of the userspace-provided address, which has to be 0UL
++ * anyway).
++ * - use the same pgoff which the get_unmapped_area() uses to
++ * calculate the page colouring.
++ * For architectures without such aliasing requirements, the
++ * architecture will return any suitable mapping because addr is 0.
++ */
++ filp = NULL;
++ flags |= MAP_SHARED;
++ pgoff = 0; /* has been translated to ptr above */
++#ifdef SHM_COLOUR
++ addr = (uintptr_t) ptr;
++ pgoff = addr >> PAGE_SHIFT;
++#else
++ addr = 0UL;
++#endif
++ return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
++}
++
+ #else /* !CONFIG_MMU */
+
+ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -3324,6 +3368,8 @@ static const struct file_operations io_u
+ #ifndef CONFIG_MMU
+ .get_unmapped_area = io_uring_nommu_get_unmapped_area,
+ .mmap_capabilities = io_uring_nommu_mmap_capabilities,
++#else
++ .get_unmapped_area = io_uring_mmu_get_unmapped_area,
+ #endif
+ .poll = io_uring_poll,
+ #ifdef CONFIG_PROC_FS
--- /dev/null
+From 0a6b58c5cd0dfd7961e725212f0fc8dfc5d96195 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 15 Aug 2023 00:31:09 +0200
+Subject: lockdep: fix static memory detection even more
+
+From: Helge Deller <deller@gmx.de>
+
+commit 0a6b58c5cd0dfd7961e725212f0fc8dfc5d96195 upstream.
+
+On the parisc architecture, lockdep reports for all static objects which
+are in the __initdata section (e.g. "setup_done" in devtmpfs,
+"kthreadd_done" in init/main.c) this warning:
+
+ INFO: trying to register non-static key.
+
+The warning itself is wrong, because those objects are in the __initdata
+section, but the section itself is on parisc outside of range from
+_stext to _end, which is why the static_obj() functions returns a wrong
+answer.
+
+While fixing this issue, I noticed that the whole existing check can
+be simplified a lot.
+Instead of checking against the _stext and _end symbols (which include
+code areas too) just check for the .data and .bss segments (since we check a
+data object). This can be done with the existing is_kernel_core_data()
+macro.
+
+In addition objects in the __initdata section can be checked with
+init_section_contains(), and is_kernel_rodata() allows keys to be in the
+_ro_after_init section.
+
+This partly reverts and simplifies commit bac59d18c701 ("x86/setup: Fix static
+memory detection").
+
+Link: https://lkml.kernel.org/r/ZNqrLRaOi/3wPAdp@p100
+Fixes: bac59d18c701 ("x86/setup: Fix static memory detection")
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: "Rafael J. Wysocki" <rafael@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/sections.h | 18 ------------------
+ kernel/locking/lockdep.c | 36 ++++++++++++++----------------------
+ 2 files changed, 14 insertions(+), 40 deletions(-)
+
+--- a/arch/x86/include/asm/sections.h
++++ b/arch/x86/include/asm/sections.h
+@@ -2,8 +2,6 @@
+ #ifndef _ASM_X86_SECTIONS_H
+ #define _ASM_X86_SECTIONS_H
+
+-#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+-
+ #include <asm-generic/sections.h>
+ #include <asm/extable.h>
+
+@@ -18,20 +16,4 @@ extern char __end_of_kernel_reserve[];
+
+ extern unsigned long _brk_start, _brk_end;
+
+-static inline bool arch_is_kernel_initmem_freed(unsigned long addr)
+-{
+- /*
+- * If _brk_start has not been cleared, brk allocation is incomplete,
+- * and we can not make assumptions about its use.
+- */
+- if (_brk_start)
+- return 0;
+-
+- /*
+- * After brk allocation is complete, space between _brk_end and _end
+- * is available for allocation.
+- */
+- return addr >= _brk_end && addr < (unsigned long)&_end;
+-}
+-
+ #endif /* _ASM_X86_SECTIONS_H */
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -817,34 +817,26 @@ static int very_verbose(struct lock_clas
+ * Is this the address of a static object:
+ */
+ #ifdef __KERNEL__
+-/*
+- * Check if an address is part of freed initmem. After initmem is freed,
+- * memory can be allocated from it, and such allocations would then have
+- * addresses within the range [_stext, _end].
+- */
+-#ifndef arch_is_kernel_initmem_freed
+-static int arch_is_kernel_initmem_freed(unsigned long addr)
+-{
+- if (system_state < SYSTEM_FREEING_INITMEM)
+- return 0;
+-
+- return init_section_contains((void *)addr, 1);
+-}
+-#endif
+-
+ static int static_obj(const void *obj)
+ {
+- unsigned long start = (unsigned long) &_stext,
+- end = (unsigned long) &_end,
+- addr = (unsigned long) obj;
++ unsigned long addr = (unsigned long) obj;
+
+- if (arch_is_kernel_initmem_freed(addr))
+- return 0;
++ if (is_kernel_core_data(addr))
++ return 1;
++
++ /*
++ * keys are allowed in the __ro_after_init section.
++ */
++ if (is_kernel_rodata(addr))
++ return 1;
+
+ /*
+- * static variable?
++ * in initdata section and used during bootup only?
++ * NOTE: On some platforms the initdata section is
++ * outside of the _stext ... _end range.
+ */
+- if ((addr >= start) && (addr < end))
++ if (system_state < SYSTEM_FREEING_INITMEM &&
++ init_section_contains((void *)addr, 1))
+ return 1;
+
+ /*
--- /dev/null
+From 567b35159e76997e95b643b9a8a5d9d2198f2522 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave@parisc-linux.org>
+Date: Sun, 26 Feb 2023 18:03:33 +0000
+Subject: parisc: Cleanup mmap implementation regarding color alignment
+
+From: John David Anglin <dave@parisc-linux.org>
+
+commit 567b35159e76997e95b643b9a8a5d9d2198f2522 upstream.
+
+This change simplifies the randomization of file mapping regions. It
+reworks the code to remove duplication. The flow is now similar to
+that for mips. Finally, we consistently use the do_color_align variable
+to determine when color alignment is needed.
+
+Tested on rp3440.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/sys_parisc.c | 166 +++++++++++++++-------------------------
+ 1 file changed, 63 insertions(+), 103 deletions(-)
+
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -25,31 +25,26 @@
+ #include <linux/random.h>
+ #include <linux/compat.h>
+
+-/* we construct an artificial offset for the mapping based on the physical
+- * address of the kernel mapping variable */
+-#define GET_LAST_MMAP(filp) \
+- (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
+-#define SET_LAST_MMAP(filp, val) \
+- { /* nothing */ }
+-
+-static int get_offset(unsigned int last_mmap)
+-{
+- return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
+-}
++/*
++ * Construct an artificial page offset for the mapping based on the physical
++ * address of the kernel file mapping variable.
++ */
++#define GET_FILP_PGOFF(filp) \
++ (filp ? (((unsigned long) filp->f_mapping) >> 8) \
++ & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
+
+-static unsigned long shared_align_offset(unsigned int last_mmap,
++static unsigned long shared_align_offset(unsigned long filp_pgoff,
+ unsigned long pgoff)
+ {
+- return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
++ return (filp_pgoff + pgoff) << PAGE_SHIFT;
+ }
+
+ static inline unsigned long COLOR_ALIGN(unsigned long addr,
+- unsigned int last_mmap, unsigned long pgoff)
++ unsigned long filp_pgoff, unsigned long pgoff)
+ {
+ unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
+ unsigned long off = (SHM_COLOUR-1) &
+- (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
+-
++ shared_align_offset(filp_pgoff, pgoff);
+ return base + off;
+ }
+
+@@ -98,126 +93,91 @@ static unsigned long mmap_upper_limit(st
+ return PAGE_ALIGN(STACK_TOP - stack_base);
+ }
+
++enum mmap_allocation_direction {UP, DOWN};
+
+-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+- unsigned long len, unsigned long pgoff, unsigned long flags)
++static unsigned long arch_get_unmapped_area_common(struct file *filp,
++ unsigned long addr, unsigned long len, unsigned long pgoff,
++ unsigned long flags, enum mmap_allocation_direction dir)
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma, *prev;
+- unsigned long task_size = TASK_SIZE;
+- int do_color_align, last_mmap;
++ unsigned long filp_pgoff;
++ int do_color_align;
+ struct vm_unmapped_area_info info;
+
+- if (len > task_size)
++ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+- last_mmap = GET_LAST_MMAP(filp);
++ filp_pgoff = GET_FILP_PGOFF(filp);
+
+ if (flags & MAP_FIXED) {
+- if ((flags & MAP_SHARED) && last_mmap &&
+- (addr - shared_align_offset(last_mmap, pgoff))
++ /* Even MAP_FIXED mappings must reside within TASK_SIZE */
++ if (TASK_SIZE - len < addr)
++ return -EINVAL;
++
++ if ((flags & MAP_SHARED) && filp &&
++ (addr - shared_align_offset(filp_pgoff, pgoff))
+ & (SHM_COLOUR - 1))
+ return -EINVAL;
+- goto found_addr;
++ return addr;
+ }
+
+ if (addr) {
+- if (do_color_align && last_mmap)
+- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
++ if (do_color_align)
++ addr = COLOR_ALIGN(addr, filp_pgoff, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma_prev(mm, addr, &prev);
+- if (task_size - len >= addr &&
++ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
+- goto found_addr;
++ return addr;
+ }
+
+- info.flags = 0;
+ info.length = len;
+- info.low_limit = mm->mmap_legacy_base;
+- info.high_limit = mmap_upper_limit(NULL);
+- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+- info.align_offset = shared_align_offset(last_mmap, pgoff);
+- addr = vm_unmapped_area(&info);
++ info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
++ info.align_offset = shared_align_offset(filp_pgoff, pgoff);
+
+-found_addr:
+- if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+- SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
++ if (dir == DOWN) {
++ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
++ info.low_limit = PAGE_SIZE;
++ info.high_limit = mm->mmap_base;
++ addr = vm_unmapped_area(&info);
++ if (!(addr & ~PAGE_MASK))
++ return addr;
++ VM_BUG_ON(addr != -ENOMEM);
++
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++ }
+
+- return addr;
++ info.flags = 0;
++ info.low_limit = mm->mmap_legacy_base;
++ info.high_limit = mmap_upper_limit(NULL);
++ return vm_unmapped_area(&info);
+ }
+
+-unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+- struct vm_area_struct *vma, *prev;
+- struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
+- int do_color_align, last_mmap;
+- struct vm_unmapped_area_info info;
+-
+- /* requested length too big for entire address space */
+- if (len > TASK_SIZE)
+- return -ENOMEM;
+-
+- do_color_align = 0;
+- if (filp || (flags & MAP_SHARED))
+- do_color_align = 1;
+- last_mmap = GET_LAST_MMAP(filp);
+-
+- if (flags & MAP_FIXED) {
+- if ((flags & MAP_SHARED) && last_mmap &&
+- (addr - shared_align_offset(last_mmap, pgoff))
+- & (SHM_COLOUR - 1))
+- return -EINVAL;
+- goto found_addr;
+- }
+-
+- /* requesting a specific address */
+- if (addr) {
+- if (do_color_align && last_mmap)
+- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+- else
+- addr = PAGE_ALIGN(addr);
+-
+- vma = find_vma_prev(mm, addr, &prev);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vm_start_gap(vma)) &&
+- (!prev || addr >= vm_end_gap(prev)))
+- goto found_addr;
+- }
+-
+- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+- info.length = len;
+- info.low_limit = PAGE_SIZE;
+- info.high_limit = mm->mmap_base;
+- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+- info.align_offset = shared_align_offset(last_mmap, pgoff);
+- addr = vm_unmapped_area(&info);
+- if (!(addr & ~PAGE_MASK))
+- goto found_addr;
+- VM_BUG_ON(addr != -ENOMEM);
+-
+- /*
+- * A failed mmap() very likely causes application failure,
+- * so fall back to the bottom-up function here. This scenario
+- * can happen with large stack limits and large mmap()
+- * allocations.
+- */
+- return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+-
+-found_addr:
+- if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+- SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
++ return arch_get_unmapped_area_common(filp,
++ addr, len, pgoff, flags, UP);
++}
+
+- return addr;
++unsigned long arch_get_unmapped_area_topdown(struct file *filp,
++ unsigned long addr, unsigned long len, unsigned long pgoff,
++ unsigned long flags)
++{
++ return arch_get_unmapped_area_common(filp,
++ addr, len, pgoff, flags, DOWN);
+ }
+
+ static int mmap_is_legacy(void)
--- /dev/null
+From b5d89408b9fb21258f7c371d6d48a674f60f7181 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Fri, 30 Jun 2023 12:36:09 +0200
+Subject: parisc: sys_parisc: parisc_personality() is called from asm code
+
+From: Helge Deller <deller@gmx.de>
+
+commit b5d89408b9fb21258f7c371d6d48a674f60f7181 upstream.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/sys_parisc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -24,6 +24,7 @@
+ #include <linux/personality.h>
+ #include <linux/random.h>
+ #include <linux/compat.h>
++#include <linux/elf-randomize.h>
+
+ /*
+ * Construct an artificial page offset for the mapping based on the physical
+@@ -339,7 +340,7 @@ asmlinkage long parisc_fallocate(int fd,
+ ((u64)lenhi << 32) | lenlo);
+ }
+
+-long parisc_personality(unsigned long personality)
++asmlinkage long parisc_personality(unsigned long personality)
+ {
+ long err;
+
arm64-module-plts-inline-linux-moduleloader.h.patch
arm64-module-use-module_init_layout_section-to-spot-init-sections.patch
arm-module-use-module_init_layout_section-to-spot-init-sections.patch
+lockdep-fix-static-memory-detection-even-more.patch
+parisc-cleanup-mmap-implementation-regarding-color-alignment.patch
+parisc-sys_parisc-parisc_personality-is-called-from-asm-code.patch
+io_uring-parisc-adjust-pgoff-in-io_uring-mmap-for-parisc.patch