--- /dev/null
+From 2baafef8bb771899a1c7218b96853c0889a35bdd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Feb 2024 08:03:24 +0100
+Subject: ARM: 9352/1: iwmmxt: Remove support for PJ4/PJ4B cores
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Upstream commit b9920fdd5a751df129808e7fa512e9928223ee05 ]
+
+PJ4 is a v7 core that incorporates a iWMMXt coprocessor. However, GCC
+does not support this combination (its iWMMXt configuration always
+implies v5te), and so there is no v6/v7 user space that actually makes
+use of this, beyond generic support for things like setjmp() that
+preserve/restore the iWMMXt register file using generic LDC/STC
+instructions emitted in assembler. As [0] appears to imply, this logic
+is triggered for the init process at boot, and so most user threads will
+have a iWMMXt register context associated with it, even though it is
+never used.
+
+At this point, it is highly unlikely that such GCC support will ever
+materialize (and Clang does not implement support for iWMMXt to begin
+with).
+
+This means that advertising iWMMXt support on these cores results in
+context switch overhead without any associated benefit, and so it is
+better to simply ignore the iWMMXt unit on these systems. So rip out the
+support. Doing so also fixes the issue reported in [0] related to UNDEF
+handling of co-processor #0/#1 instructions issued from user space
+running in Thumb2 mode.
+
+The PJ4 cores are used in four platforms: Armada 370/xp, Dove (Cubox,
+d2plug), MMP2 (xo-1.75) and Berlin (Google TV). Out of these, only the
+first is still widely used, but that one actually doesn't have iWMMXt
+but instead has only VFPV3-D16, and so it is not impacted by this
+change.
+
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218427 [0]
+
+Fixes: 8bcba70cb5c22 ("ARM: entry: Disregard Thumb undef exception ...")
+Acked-by: Linus Walleij <linus.walleij@linaro.org>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Nicolas Pitre <nico@fluxnic.net>
+Reviewed-by: Jisheng Zhang <jszhang@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/Kconfig | 4 +-
+ arch/arm/kernel/Makefile | 2 -
+ arch/arm/kernel/iwmmxt.S | 51 ++++----------
+ arch/arm/kernel/pj4-cp0.c | 135 --------------------------------------
+ 4 files changed, 15 insertions(+), 177 deletions(-)
+ delete mode 100644 arch/arm/kernel/pj4-cp0.c
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 8f47d6762ea4b..f53832383a635 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -591,8 +591,8 @@ source "arch/arm/mm/Kconfig"
+
+ config IWMMXT
+ bool "Enable iWMMXt support"
+- depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
+- default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
++ depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
++ default y if PXA27x || PXA3xx || ARCH_MMP
+ help
+ Enable support for iWMMXt context switching at run time if
+ running on a CPU that supports it.
+diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
+index 771264d4726a7..ae2f2b2b4e5ab 100644
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -75,8 +75,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
+ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
+ obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
+-obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
+-obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
+ obj-$(CONFIG_IWMMXT) += iwmmxt.o
+ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
+ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
+diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
+index a0218c4867b9b..4a335d3c59690 100644
+--- a/arch/arm/kernel/iwmmxt.S
++++ b/arch/arm/kernel/iwmmxt.S
+@@ -18,18 +18,6 @@
+ #include <asm/assembler.h>
+ #include "iwmmxt.h"
+
+-#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
+-#define PJ4(code...) code
+-#define XSC(code...)
+-#elif defined(CONFIG_CPU_MOHAWK) || \
+- defined(CONFIG_CPU_XSC3) || \
+- defined(CONFIG_CPU_XSCALE)
+-#define PJ4(code...)
+-#define XSC(code...) code
+-#else
+-#error "Unsupported iWMMXt architecture"
+-#endif
+-
+ #define MMX_WR0 (0x00)
+ #define MMX_WR1 (0x08)
+ #define MMX_WR2 (0x10)
+@@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler)
+ ENTRY(iwmmxt_task_enable)
+ inc_preempt_count r10, r3
+
+- XSC(mrc p15, 0, r2, c15, c1, 0)
+- PJ4(mrc p15, 0, r2, c1, c0, 2)
++ mrc p15, 0, r2, c15, c1, 0
+ @ CP0 and CP1 accessible?
+- XSC(tst r2, #0x3)
+- PJ4(tst r2, #0xf)
++ tst r2, #0x3
+ bne 4f @ if so no business here
+ @ enable access to CP0 and CP1
+- XSC(orr r2, r2, #0x3)
+- XSC(mcr p15, 0, r2, c15, c1, 0)
+- PJ4(orr r2, r2, #0xf)
+- PJ4(mcr p15, 0, r2, c1, c0, 2)
++ orr r2, r2, #0x3
++ mcr p15, 0, r2, c15, c1, 0
+
+ ldr r3, =concan_owner
+ ldr r2, [r0, #S_PC] @ current task pc value
+@@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
+ bne 1f @ no: quit
+
+ @ enable access to CP0 and CP1
+- XSC(mrc p15, 0, r4, c15, c1, 0)
+- XSC(orr r4, r4, #0x3)
+- XSC(mcr p15, 0, r4, c15, c1, 0)
+- PJ4(mrc p15, 0, r4, c1, c0, 2)
+- PJ4(orr r4, r4, #0xf)
+- PJ4(mcr p15, 0, r4, c1, c0, 2)
++ mrc p15, 0, r4, c15, c1, 0
++ orr r4, r4, #0x3
++ mcr p15, 0, r4, c15, c1, 0
+
+ mov r0, #0 @ nothing to load
+ str r0, [r3] @ no more current owner
+@@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
+ bl concan_save
+
+ @ disable access to CP0 and CP1
+- XSC(bic r4, r4, #0x3)
+- XSC(mcr p15, 0, r4, c15, c1, 0)
+- PJ4(bic r4, r4, #0xf)
+- PJ4(mcr p15, 0, r4, c1, c0, 2)
++ bic r4, r4, #0x3
++ mcr p15, 0, r4, c15, c1, 0
+
+ mrc p15, 0, r2, c2, c0, 0
+ mov r2, r2 @ cpwait
+@@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
+ */
+ ENTRY(iwmmxt_task_switch)
+
+- XSC(mrc p15, 0, r1, c15, c1, 0)
+- PJ4(mrc p15, 0, r1, c1, c0, 2)
++ mrc p15, 0, r1, c15, c1, 0
+ @ CP0 and CP1 accessible?
+- XSC(tst r1, #0x3)
+- PJ4(tst r1, #0xf)
++ tst r1, #0x3
+ bne 1f @ yes: block them for next task
+
+ ldr r2, =concan_owner
+@@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
+ retne lr @ no: leave Concan disabled
+
+ 1: @ flip Concan access
+- XSC(eor r1, r1, #0x3)
+- XSC(mcr p15, 0, r1, c15, c1, 0)
+- PJ4(eor r1, r1, #0xf)
+- PJ4(mcr p15, 0, r1, c1, c0, 2)
++ eor r1, r1, #0x3
++ mcr p15, 0, r1, c15, c1, 0
+
+ mrc p15, 0, r1, c2, c0, 0
+ sub pc, lr, r1, lsr #32 @ cpwait and return
+diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
+deleted file mode 100644
+index 4bca8098c4ff5..0000000000000
+--- a/arch/arm/kernel/pj4-cp0.c
++++ /dev/null
+@@ -1,135 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * linux/arch/arm/kernel/pj4-cp0.c
+- *
+- * PJ4 iWMMXt coprocessor context switching and handling
+- *
+- * Copyright (c) 2010 Marvell International Inc.
+- */
+-
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/signal.h>
+-#include <linux/sched.h>
+-#include <linux/init.h>
+-#include <linux/io.h>
+-#include <asm/thread_notify.h>
+-#include <asm/cputype.h>
+-
+-static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
+-{
+- struct thread_info *thread = t;
+-
+- switch (cmd) {
+- case THREAD_NOTIFY_FLUSH:
+- /*
+- * flush_thread() zeroes thread->fpstate, so no need
+- * to do anything here.
+- *
+- * FALLTHROUGH: Ensure we don't try to overwrite our newly
+- * initialised state information on the first fault.
+- */
+-
+- case THREAD_NOTIFY_EXIT:
+- iwmmxt_task_release(thread);
+- break;
+-
+- case THREAD_NOTIFY_SWITCH:
+- iwmmxt_task_switch(thread);
+- break;
+- }
+-
+- return NOTIFY_DONE;
+-}
+-
+-static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
+- .notifier_call = iwmmxt_do,
+-};
+-
+-
+-static u32 __init pj4_cp_access_read(void)
+-{
+- u32 value;
+-
+- __asm__ __volatile__ (
+- "mrc p15, 0, %0, c1, c0, 2\n\t"
+- : "=r" (value));
+- return value;
+-}
+-
+-static void __init pj4_cp_access_write(u32 value)
+-{
+- u32 temp;
+-
+- __asm__ __volatile__ (
+- "mcr p15, 0, %1, c1, c0, 2\n\t"
+-#ifdef CONFIG_THUMB2_KERNEL
+- "isb\n\t"
+-#else
+- "mrc p15, 0, %0, c1, c0, 2\n\t"
+- "mov %0, %0\n\t"
+- "sub pc, pc, #4\n\t"
+-#endif
+- : "=r" (temp) : "r" (value));
+-}
+-
+-static int __init pj4_get_iwmmxt_version(void)
+-{
+- u32 cp_access, wcid;
+-
+- cp_access = pj4_cp_access_read();
+- pj4_cp_access_write(cp_access | 0xf);
+-
+- /* check if coprocessor 0 and 1 are available */
+- if ((pj4_cp_access_read() & 0xf) != 0xf) {
+- pj4_cp_access_write(cp_access);
+- return -ENODEV;
+- }
+-
+- /* read iWMMXt coprocessor id register p1, c0 */
+- __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
+-
+- pj4_cp_access_write(cp_access);
+-
+- /* iWMMXt v1 */
+- if ((wcid & 0xffffff00) == 0x56051000)
+- return 1;
+- /* iWMMXt v2 */
+- if ((wcid & 0xffffff00) == 0x56052000)
+- return 2;
+-
+- return -EINVAL;
+-}
+-
+-/*
+- * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
+- * switch code handle iWMMXt context switching.
+- */
+-static int __init pj4_cp0_init(void)
+-{
+- u32 __maybe_unused cp_access;
+- int vers;
+-
+- if (!cpu_is_pj4())
+- return 0;
+-
+- vers = pj4_get_iwmmxt_version();
+- if (vers < 0)
+- return 0;
+-
+-#ifndef CONFIG_IWMMXT
+- pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
+-#else
+- cp_access = pj4_cp_access_read() & ~0xf;
+- pj4_cp_access_write(cp_access);
+-
+- pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
+- elf_hwcap |= HWCAP_IWMMXT;
+- thread_register_notifier(&iwmmxt_notifier_block);
+- register_iwmmxt_undef_handler();
+-#endif
+-
+- return 0;
+-}
+-
+-late_initcall(pj4_cp0_init);
+--
+2.43.0
+
--- /dev/null
+From 89cc7d7c266a2354f80a10bf73e5b27725470bcf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Mar 2024 13:05:09 +0100
+Subject: ARM: 9359/1: flush: check if the folio is reserved for no-mapping
+ addresses
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yongqiang Liu <liuyongqiang13@huawei.com>
+
+[ Upstream commit 0c66c6f4e21cb22220cbd8821c5c73fc157d20dc ]
+
+Since commit a4d5613c4dc6 ("arm: extend pfn_valid to take into account
+freed memory map alignment") changes the semantics of pfn_valid() to check
+presence of the memory map for a PFN. A valid page for an address which
+is reserved but not mapped by the kernel[1], the system crashed during
+some uio test with the following memory layout:
+
+ node 0: [mem 0x00000000c0a00000-0x00000000cc8fffff]
+ node 0: [mem 0x00000000d0000000-0x00000000da1fffff]
+ the uio layout is:0xc0900000, 0x100000
+
+the crash backtrace like:
+
+ Unable to handle kernel paging request at virtual address bff00000
+ [...]
+ CPU: 1 PID: 465 Comm: startapp.bin Tainted: G O 5.10.0 #1
+ Hardware name: Generic DT based system
+ PC is at b15_flush_kern_dcache_area+0x24/0x3c
+ LR is at __sync_icache_dcache+0x6c/0x98
+ [...]
+ (b15_flush_kern_dcache_area) from (__sync_icache_dcache+0x6c/0x98)
+ (__sync_icache_dcache) from (set_pte_at+0x28/0x54)
+ (set_pte_at) from (remap_pfn_range+0x1a0/0x274)
+ (remap_pfn_range) from (uio_mmap+0x184/0x1b8 [uio])
+ (uio_mmap [uio]) from (__mmap_region+0x264/0x5f4)
+ (__mmap_region) from (__do_mmap_mm+0x3ec/0x440)
+ (__do_mmap_mm) from (do_mmap+0x50/0x58)
+ (do_mmap) from (vm_mmap_pgoff+0xfc/0x188)
+ (vm_mmap_pgoff) from (ksys_mmap_pgoff+0xac/0xc4)
+ (ksys_mmap_pgoff) from (ret_fast_syscall+0x0/0x5c)
+ Code: e0801001 e2423001 e1c00003 f57ff04f (ee070f3e)
+ ---[ end trace 09cf0734c3805d52 ]---
+ Kernel panic - not syncing: Fatal exception
+
+So check if PG_reserved was set to solve this issue.
+
+[1]: https://lore.kernel.org/lkml/Zbtdue57RO0QScJM@linux.ibm.com/
+
+Fixes: a4d5613c4dc6 ("arm: extend pfn_valid to take into account freed memory map alignment")
+Suggested-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Yongqiang Liu <liuyongqiang13@huawei.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mm/flush.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
+index d19d140a10c7d..0749cf8a66371 100644
+--- a/arch/arm/mm/flush.c
++++ b/arch/arm/mm/flush.c
+@@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval)
+ return;
+
+ folio = page_folio(pfn_to_page(pfn));
++ if (folio_test_reserved(folio))
++ return;
++
+ if (cache_is_vipt_aliasing())
+ mapping = folio_flush_mapping(folio);
+ else
+--
+2.43.0
+
--- /dev/null
+From 907679247799b8003eb772b3ef2b74102523db63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 18 Feb 2024 18:41:37 +0100
+Subject: clocksource/drivers/arm_global_timer: Fix maximum prescaler value
+
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+[ Upstream commit b34b9547cee41575a4fddf390f615570759dc999 ]
+
+The prescaler in the "Global Timer Control Register bit assignments" is
+documented to use bits [15:8], which means that the maximum prescaler
+register value is 0xff.
+
+Fixes: 171b45a4a70e ("clocksource/drivers/arm_global_timer: Implement rate compensation whenever source clock changes")
+Signed-off-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Link: https://lore.kernel.org/r/20240218174138.1942418-2-martin.blumenstingl@googlemail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clocksource/arm_global_timer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
+index 44a61dc6f9320..e1c773bb55359 100644
+--- a/drivers/clocksource/arm_global_timer.c
++++ b/drivers/clocksource/arm_global_timer.c
+@@ -32,7 +32,7 @@
+ #define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
+ #define GT_CONTROL_AUTO_INC BIT(3) /* banked */
+ #define GT_CONTROL_PRESCALER_SHIFT 8
+-#define GT_CONTROL_PRESCALER_MAX 0xF
++#define GT_CONTROL_PRESCALER_MAX 0xFF
+ #define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
+ GT_CONTROL_PRESCALER_SHIFT)
+
+--
+2.43.0
+
--- /dev/null
+From b48ffbe4539fdf172458719f7579f1c4d9ed9eac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 23 Mar 2024 06:33:33 +0000
+Subject: efi: fix panic in kdump kernel
+
+From: Oleksandr Tymoshenko <ovt@google.com>
+
+[ Upstream commit 62b71cd73d41ddac6b1760402bbe8c4932e23531 ]
+
+Check if get_next_variable() is actually valid pointer before
+calling it. In kdump kernel this method is set to NULL that causes
+panic during the kexec-ed kernel boot.
+
+Tested with QEMU and OVMF firmware.
+
+Fixes: bad267f9e18f ("efi: verify that variable services are supported")
+Signed-off-by: Oleksandr Tymoshenko <ovt@google.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/efi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 9d3910d1abe19..abdfcb5aa470c 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -199,6 +199,8 @@ static bool generic_ops_supported(void)
+
+ name_size = sizeof(name);
+
++ if (!efi.get_next_variable)
++ return false;
+ status = efi.get_next_variable(&name_size, &name, &guid);
+ if (status == EFI_UNSUPPORTED)
+ return false;
+--
+2.43.0
+
--- /dev/null
+From 965f35539e65f4ce4f05fb206d97b94fc454426f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Mar 2024 10:47:02 +0000
+Subject: efi/libstub: fix efi_random_alloc() to allocate memory at alloc_min
+ or higher address
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: KONDO KAZUMA(近藤 和真) <kazuma-kondo@nec.com>
+
+[ Upstream commit 3cb4a4827596abc82e55b80364f509d0fefc3051 ]
+
+Following warning is sometimes observed while booting my servers:
+ [ 3.594838] DMA: preallocated 4096 KiB GFP_KERNEL pool for atomic allocations
+ [ 3.602918] swapper/0: page allocation failure: order:10, mode:0xcc1(GFP_KERNEL|GFP_DMA), nodemask=(null),cpuset=/,mems_allowed=0-1
+ ...
+ [ 3.851862] DMA: preallocated 1024 KiB GFP_KERNEL|GFP_DMA pool for atomic allocation
+
+If 'nokaslr' boot option is set, the warning always happens.
+
+On x86, ZONE_DMA is small zone at the first 16MB of physical address
+space. When this problem happens, most of that space seems to be used by
+decompressed kernel. Thereby, there is not enough space at DMA_ZONE to
+meet the request of DMA pool allocation.
+
+The commit 2f77465b05b1 ("x86/efistub: Avoid placing the kernel below
+LOAD_PHYSICAL_ADDR") tried to fix this problem by introducing lower
+bound of allocation.
+
+But the fix is not complete.
+
+efi_random_alloc() allocates pages by following steps.
+1. Count total available slots ('total_slots')
+2. Select a slot ('target_slot') to allocate randomly
+3. Calculate a starting address ('target') to be included target_slot
+4. Allocate pages, which starting address is 'target'
+
+In step 1, 'alloc_min' is used to offset the starting address of memory
+chunk. But in step 3 'alloc_min' is not considered at all. As the
+result, 'target' can be miscalculated and become lower than 'alloc_min'.
+
+When KASLR is disabled, 'target_slot' is always 0 and the problem
+happens everytime if the EFI memory map of the system meets the
+condition.
+
+Fix this problem by calculating 'target' considering 'alloc_min'.
+
+Cc: linux-efi@vger.kernel.org
+Cc: Tom Englund <tomenglund26@gmail.com>
+Cc: linux-kernel@vger.kernel.org
+Fixes: 2f77465b05b1 ("x86/efistub: Avoid placing the kernel below LOAD_PHYSICAL_ADDR")
+Signed-off-by: Kazuma Kondo <kazuma-kondo@nec.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/libstub/randomalloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
+index 4e96a855fdf47..7e18528595502 100644
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
+ continue;
+ }
+
+- target = round_up(md->phys_addr, align) + target_slot * align;
++ target = round_up(max(md->phys_addr, alloc_min), align) + target_slot * align;
+ pages = size / EFI_PAGE_SIZE;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+--
+2.43.0
+
--- /dev/null
+From 5fb6c3cd4185c13278436b6a5fdf58e0ff5c205b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Mar 2024 21:17:04 +0000
+Subject: entry: Respect changes to system call number by trace_sys_enter()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: André Rösti <an.roesti@gmail.com>
+
+[ Upstream commit fb13b11d53875e28e7fbf0c26b288e4ea676aa9f ]
+
+When a probe is registered at the trace_sys_enter() tracepoint, and that
+probe changes the system call number, the old system call still gets
+executed. This worked correctly until commit b6ec41346103 ("core/entry:
+Report syscall correctly for trace and audit"), which removed the
+re-evaluation of the syscall number after the trace point.
+
+Restore the original semantics by re-evaluating the system call number
+after trace_sys_enter().
+
+The performance impact of this re-evaluation is minimal because it only
+takes place when a trace point is active, and compared to the actual trace
+point overhead the read from a cache hot variable is negligible.
+
+Fixes: b6ec41346103 ("core/entry: Report syscall correctly for trace and audit")
+Signed-off-by: André Rösti <an.roesti@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20240311211704.7262-1-an.roesti@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/entry/common.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index d7ee4bc3f2ba3..5ff4f1cd36445 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -77,8 +77,14 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
+- if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
++ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
+ trace_sys_enter(regs, syscall);
++ /*
++ * Probes or BPF hooks in the tracepoint may have changed the
++ * system call number as well.
++ */
++ syscall = syscall_get_nr(current, regs);
++ }
+
+ syscall_enter_audit(regs, syscall);
+
+--
+2.43.0
+
--- /dev/null
+From cef566f870592bf69d4caa9521ab8594f66990b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:28:28 +0000
+Subject: iommu/dma: Force swiotlb_max_mapping_size on an untrusted device
+
+From: Nicolin Chen <nicolinc@nvidia.com>
+
+[ Upstream commit afc5aa46ed560f01ceda897c053c6a40c77ce5c4 ]
+
+The swiotlb does not support a mapping size > swiotlb_max_mapping_size().
+On the other hand, with a 64KB PAGE_SIZE configuration, it's observed that
+an NVME device can map a size between 300KB~512KB, which certainly failed
+the swiotlb mappings, though the default pool of swiotlb has many slots:
+ systemd[1]: Started Journal Service.
+ => nvme 0000:00:01.0: swiotlb buffer is full (sz: 327680 bytes), total 32768 (slots), used 32 (slots)
+ note: journal-offline[392] exited with irqs disabled
+ note: journal-offline[392] exited with preempt_count 1
+
+Call trace:
+[ 3.099918] swiotlb_tbl_map_single+0x214/0x240
+[ 3.099921] iommu_dma_map_page+0x218/0x328
+[ 3.099928] dma_map_page_attrs+0x2e8/0x3a0
+[ 3.101985] nvme_prep_rq.part.0+0x408/0x878 [nvme]
+[ 3.102308] nvme_queue_rqs+0xc0/0x300 [nvme]
+[ 3.102313] blk_mq_flush_plug_list.part.0+0x57c/0x600
+[ 3.102321] blk_add_rq_to_plug+0x180/0x2a0
+[ 3.102323] blk_mq_submit_bio+0x4c8/0x6b8
+[ 3.103463] __submit_bio+0x44/0x220
+[ 3.103468] submit_bio_noacct_nocheck+0x2b8/0x360
+[ 3.103470] submit_bio_noacct+0x180/0x6c8
+[ 3.103471] submit_bio+0x34/0x130
+[ 3.103473] ext4_bio_write_folio+0x5a4/0x8c8
+[ 3.104766] mpage_submit_folio+0xa0/0x100
+[ 3.104769] mpage_map_and_submit_buffers+0x1a4/0x400
+[ 3.104771] ext4_do_writepages+0x6a0/0xd78
+[ 3.105615] ext4_writepages+0x80/0x118
+[ 3.105616] do_writepages+0x90/0x1e8
+[ 3.105619] filemap_fdatawrite_wbc+0x94/0xe0
+[ 3.105622] __filemap_fdatawrite_range+0x68/0xb8
+[ 3.106656] file_write_and_wait_range+0x84/0x120
+[ 3.106658] ext4_sync_file+0x7c/0x4c0
+[ 3.106660] vfs_fsync_range+0x3c/0xa8
+[ 3.106663] do_fsync+0x44/0xc0
+
+Since untrusted devices might go down the swiotlb pathway with dma-iommu,
+these devices should not map a size larger than swiotlb_max_mapping_size.
+
+To fix this bug, add iommu_dma_max_mapping_size() for untrusted devices to
+take into account swiotlb_max_mapping_size() v.s. iova_rcache_range() from
+the iommu_dma_opt_mapping_size().
+
+Fixes: 82612d66d51d ("iommu: Allow the dma-iommu api to use bounce buffers")
+Link: https://lore.kernel.org/r/ee51a3a5c32cf885b18f6416171802669f4a718a.1707851466.git.nicolinc@nvidia.com
+Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
+[will: Drop redundant is_swiotlb_active(dev) check]
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Acked-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Nicolin Chen <nicolinc@nvidia.com>
+Tested-by: Michael Kelley <mhklinux@outlook.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/dma-iommu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 037fcf826407f..a0767ce1bd133 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -1706,6 +1706,14 @@ static size_t iommu_dma_opt_mapping_size(void)
+ return iova_rcache_range();
+ }
+
++static size_t iommu_dma_max_mapping_size(struct device *dev)
++{
++ if (dev_is_untrusted(dev))
++ return swiotlb_max_mapping_size(dev);
++
++ return SIZE_MAX;
++}
++
+ static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
+ .alloc = iommu_dma_alloc,
+@@ -1728,6 +1736,7 @@ static const struct dma_map_ops iommu_dma_ops = {
+ .unmap_resource = iommu_dma_unmap_resource,
+ .get_merge_boundary = iommu_dma_get_merge_boundary,
+ .opt_mapping_size = iommu_dma_opt_mapping_size,
++ .max_mapping_size = iommu_dma_max_mapping_size,
+ };
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From 93614ae894b84eb51bcbce76e7fe9af424252d05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 13:18:17 +0200
+Subject: irqchip/renesas-rzg2l: Add macro to retrieve TITSR register offset
+ based on register's index
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+[ Upstream commit 2eca4731cc66563b3919d8753dbd74d18c39f662 ]
+
+There are 2 TITSR registers available on the IA55 interrupt controller.
+
+Add a macro that retrieves the TITSR register offset based on it's
+index. This macro is useful in when adding suspend/resume support so both
+TITSR registers can be accessed in a for loop.
+
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20231120111820.87398-7-claudiu.beznea.uj@bp.renesas.com
+Stable-dep-of: 853a6030303f ("irqchip/renesas-rzg2l: Prevent spurious interrupts when setting trigger type")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index d6514f2d51aff..3f2f4ebfe4da6 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -28,8 +28,7 @@
+ #define ISCR 0x10
+ #define IITSR 0x14
+ #define TSCR 0x20
+-#define TITSR0 0x24
+-#define TITSR1 0x28
++#define TITSR(n) (0x24 + (n) * 4)
+ #define TITSR0_MAX_INT 16
+ #define TITSEL_WIDTH 0x2
+ #define TSSR(n) (0x30 + ((n) * 4))
+@@ -206,8 +205,7 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 titseln = hwirq - IRQC_TINT_START;
+- u32 offset;
+- u8 sense;
++ u8 index, sense;
+ u32 reg;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+@@ -223,17 +221,17 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ return -EINVAL;
+ }
+
+- offset = TITSR0;
++ index = 0;
+ if (titseln >= TITSR0_MAX_INT) {
+ titseln -= TITSR0_MAX_INT;
+- offset = TITSR1;
++ index = 1;
+ }
+
+ raw_spin_lock(&priv->lock);
+- reg = readl_relaxed(priv->base + offset);
++ reg = readl_relaxed(priv->base + TITSR(index));
+ reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
+ reg |= sense << (titseln * TITSEL_WIDTH);
+- writel_relaxed(reg, priv->base + offset);
++ writel_relaxed(reg, priv->base + TITSR(index));
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 07afa4639d1422ca5367ff435d5809ecb19c6a3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 18:39:18 +0000
+Subject: irqchip/renesas-rzg2l: Flush posted write in irq_eoi()
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 9eec61df55c51415409c7cc47e9a1c8de94a0522 ]
+
+The irq_eoi() callback of the RZ/G2L interrupt chip clears the relevant
+interrupt cause bit in the TSCR register by writing to it.
+
+This write is not sufficient because the write is posted and therefore not
+guaranteed to immediately clear the bit. Due to that delay the CPU can
+raise the just handled interrupt again.
+
+Prevent this by reading the register back which causes the posted write to
+be flushed to the hardware before the read completes.
+
+Fixes: 3fed09559cd8 ("irqchip: Add RZ/G2L IA55 Interrupt Controller driver")
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 3dc2b3867f219..d6514f2d51aff 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -81,8 +81,14 @@ static void rzg2l_irq_eoi(struct irq_data *d)
+ * ISCR can only be cleared if the type is falling-edge, rising-edge or
+ * falling/rising-edge.
+ */
+- if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq)))
++ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
+ writel_relaxed(iscr & ~bit, priv->base + ISCR);
++ /*
++ * Enforce that the posted write is flushed to prevent that the
++ * just handled interrupt is raised again.
++ */
++ readl_relaxed(priv->base + ISCR);
++ }
+ }
+
+ static void rzg2l_tint_eoi(struct irq_data *d)
+@@ -93,8 +99,14 @@ static void rzg2l_tint_eoi(struct irq_data *d)
+ u32 reg;
+
+ reg = readl_relaxed(priv->base + TSCR);
+- if (reg & bit)
++ if (reg & bit) {
+ writel_relaxed(reg & ~bit, priv->base + TSCR);
++ /*
++ * Enforce that the posted write is flushed to prevent that the
++ * just handled interrupt is raised again.
++ */
++ readl_relaxed(priv->base + TSCR);
++ }
+ }
+
+ static void rzg2l_irqc_eoi(struct irq_data *d)
+--
+2.43.0
+
--- /dev/null
+From 6314bad040980b3141c19192c04e731281ff9a0b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 13:18:16 +0200
+Subject: irqchip/renesas-rzg2l: Implement restriction when writing ISCR
+ register
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+[ Upstream commit ef88eefb1a81a8701eabb7d5ced761a66a465a49 ]
+
+The RZ/G2L manual (chapter "IRQ Status Control Register (ISCR)") describes
+the operation to clear interrupts through the ISCR register as follows:
+
+[Write operation]
+
+ When "Falling-edge detection", "Rising-edge detection" or
+ "Falling/Rising-edge detection" is set in IITSR:
+
+ - In case ISTAT is 1
+ 0: IRQn interrupt detection status is cleared.
+ 1: Invalid to write.
+ - In case ISTAT is 0
+ Invalid to write.
+
+ When "Low-level detection" is set in IITSR.:
+ Invalid to write.
+
+Take the interrupt type into account when clearing interrupts through the
+ISCR register to avoid writing the ISCR when the interrupt type is level.
+
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Link: https://lore.kernel.org/r/20231120111820.87398-6-claudiu.beznea.uj@bp.renesas.com
+Stable-dep-of: 9eec61df55c5 ("irqchip/renesas-rzg2l: Flush posted write in irq_eoi()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index fe8d516f36149..3dc2b3867f219 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -72,11 +72,17 @@ static void rzg2l_irq_eoi(struct irq_data *d)
+ unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 bit = BIT(hw_irq);
+- u32 reg;
++ u32 iitsr, iscr;
+
+- reg = readl_relaxed(priv->base + ISCR);
+- if (reg & bit)
+- writel_relaxed(reg & ~bit, priv->base + ISCR);
++ iscr = readl_relaxed(priv->base + ISCR);
++ iitsr = readl_relaxed(priv->base + IITSR);
++
++ /*
++ * ISCR can only be cleared if the type is falling-edge, rising-edge or
++ * falling/rising-edge.
++ */
++ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq)))
++ writel_relaxed(iscr & ~bit, priv->base + ISCR);
+ }
+
+ static void rzg2l_tint_eoi(struct irq_data *d)
+--
+2.43.0
+
--- /dev/null
+From 48a6b24db741814e592c96e3a8640bc574324828 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 18:39:21 +0000
+Subject: irqchip/renesas-rzg2l: Prevent spurious interrupts when setting
+ trigger type
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 853a6030303f8a8fa54929b68e5665d9b21aa405 ]
+
+RZ/G2L interrupt chips require that the interrupt is masked before changing
+the NMI, IRQ, TINT interrupt settings. Aside of that, after setting an edge
+trigger type it is required to clear the interrupt status register in order
+to avoid spurious interrupts.
+
+The current implementation fails to do either of that and therefore is
+prone to generate spurious interrupts when setting the trigger type.
+
+Address this by:
+
+ - Ensuring that the interrupt is masked at the chip level across the
+ update for the TINT chip
+
+ - Clearing the interrupt status register after updating the trigger mode
+ for edge type interrupts
+
+[ tglx: Massaged changelog and reverted the spin_lock_irqsave() change as
+ the set_type() callback is always called with interrupts disabled. ]
+
+Fixes: 3fed09559cd8 ("irqchip: Add RZ/G2L IA55 Interrupt Controller driver")
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 36 +++++++++++++++++++++++++----
+ 1 file changed, 32 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 762bb90b74e61..dc822111fc5d5 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -162,8 +162,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+
+ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
++ unsigned int hwirq = irqd_to_hwirq(d);
++ u32 iitseln = hwirq - IRQC_IRQ_START;
++ bool clear_irq_int = false;
+ u16 sense, tmp;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+@@ -173,14 +175,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = IITSR_IITSEL_EDGE_FALLING;
++ clear_irq_int = true;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = IITSR_IITSEL_EDGE_RISING;
++ clear_irq_int = true;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = IITSR_IITSEL_EDGE_BOTH;
++ clear_irq_int = true;
+ break;
+
+ default:
+@@ -189,21 +194,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+
+ raw_spin_lock(&priv->lock);
+ tmp = readl_relaxed(priv->base + IITSR);
+- tmp &= ~IITSR_IITSEL_MASK(hw_irq);
+- tmp |= IITSR_IITSEL(hw_irq, sense);
++ tmp &= ~IITSR_IITSEL_MASK(iitseln);
++ tmp |= IITSR_IITSEL(iitseln, sense);
++ if (clear_irq_int)
++ rzg2l_clear_irq_int(priv, hwirq);
+ writel_relaxed(tmp, priv->base + IITSR);
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+ }
+
++static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
++ u32 reg, u32 tssr_offset, u8 tssr_index)
++{
++ u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
++ u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
++
++ /* Clear the relevant byte in reg */
++ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
++ /* Set TINT and leave TIEN clear */
++ reg |= tint << TSSEL_SHIFT(tssr_offset);
++ writel_relaxed(reg, priv->base + TSSR(tssr_index));
++
++ return reg | tien;
++}
++
+ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ {
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 titseln = hwirq - IRQC_TINT_START;
++ u32 tssr_offset = TSSR_OFFSET(titseln);
++ u8 tssr_index = TSSR_INDEX(titseln);
+ u8 index, sense;
+- u32 reg;
++ u32 reg, tssr;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+@@ -225,10 +249,14 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ }
+
+ raw_spin_lock(&priv->lock);
++ tssr = readl_relaxed(priv->base + TSSR(tssr_index));
++ tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
+ reg = readl_relaxed(priv->base + TITSR(index));
+ reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
+ reg |= sense << (titseln * TITSEL_WIDTH);
+ writel_relaxed(reg, priv->base + TITSR(index));
++ rzg2l_clear_tint_int(priv, hwirq);
++ writel_relaxed(tssr, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 9c58ab7c5c26219577200bbe96192d3e03f87483 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 18:39:20 +0000
+Subject: irqchip/renesas-rzg2l: Rename rzg2l_irq_eoi()
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit b4b5cd61a6fdd92ede0dc39f0850a182affd1323 ]
+
+Rename rzg2l_irq_eoi()->rzg2l_clear_irq_int() and simplify the code by
+removing redundant priv local variable.
+
+Suggested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Stable-dep-of: 853a6030303f ("irqchip/renesas-rzg2l: Prevent spurious interrupts when setting trigger type")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 51dda1745ffaa..762bb90b74e61 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -66,10 +66,9 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
+ return data->domain->host_data;
+ }
+
+-static void rzg2l_irq_eoi(struct irq_data *d)
++static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
+ {
+- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
++ unsigned int hw_irq = hwirq - IRQC_IRQ_START;
+ u32 bit = BIT(hw_irq);
+ u32 iitsr, iscr;
+
+@@ -113,7 +112,7 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
+
+ raw_spin_lock(&priv->lock);
+ if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+- rzg2l_irq_eoi(d);
++ rzg2l_clear_irq_int(priv, hw_irq);
+ else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+ rzg2l_clear_tint_int(priv, hw_irq);
+ raw_spin_unlock(&priv->lock);
+--
+2.43.0
+
--- /dev/null
+From 6427f70f7cbfb6709d29b98188cce1f19def5bbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Mar 2024 18:39:19 +0000
+Subject: irqchip/renesas-rzg2l: Rename rzg2l_tint_eoi()
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 7cb6362c63df233172eaecddaf9ce2ce2f769112 ]
+
+Rename rzg2l_tint_eoi()->rzg2l_clear_tint_int() and simplify the code by
+removing redundant priv and hw_irq local variables.
+
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Stable-dep-of: 853a6030303f ("irqchip/renesas-rzg2l: Prevent spurious interrupts when setting trigger type")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 3f2f4ebfe4da6..51dda1745ffaa 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -90,11 +90,9 @@ static void rzg2l_irq_eoi(struct irq_data *d)
+ }
+ }
+
+-static void rzg2l_tint_eoi(struct irq_data *d)
++static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
+ {
+- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
+- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+- u32 bit = BIT(hw_irq);
++ u32 bit = BIT(hwirq - IRQC_TINT_START);
+ u32 reg;
+
+ reg = readl_relaxed(priv->base + TSCR);
+@@ -117,7 +115,7 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
+ if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+ rzg2l_irq_eoi(d);
+ else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+- rzg2l_tint_eoi(d);
++ rzg2l_clear_tint_int(priv, hw_irq);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_eoi_parent(d);
+ }
+--
+2.43.0
+
--- /dev/null
+From fb6a26180339d570d69a3cfebdabb567b6698e0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Mar 2024 00:17:30 +0900
+Subject: kprobes/x86: Use copy_from_kernel_nofault() to read from unsafe
+ address
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+[ Upstream commit 4e51653d5d871f40f1bd5cf95cc7f2d8b33d063b ]
+
+Read from an unsafe address with copy_from_kernel_nofault() in
+arch_adjust_kprobe_addr() because this function is used before checking
+the address is in text or not. Syzcaller bot found a bug and reported
+the case if user specifies inaccessible data area,
+arch_adjust_kprobe_addr() will cause a kernel panic.
+
+[ mingo: Clarified the comment. ]
+
+Fixes: cc66bb914578 ("x86/ibt,kprobes: Cure sym+0 equals fentry woes")
+Reported-by: Qiang Zhang <zzqq0103.hey@gmail.com>
+Tested-by: Jinghao Jia <jinghao7@illinois.edu>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/171042945004.154897.2221804961882915806.stgit@devnote2
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/kprobes/core.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index a0ce46c0a2d88..a6a3475e1d609 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -335,7 +335,16 @@ static int can_probe(unsigned long paddr)
+ kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
+ bool *on_func_entry)
+ {
+- if (is_endbr(*(u32 *)addr)) {
++ u32 insn;
++
++ /*
++ * Since 'addr' is not guaranteed to be safe to access, use
++ * copy_from_kernel_nofault() to read the instruction:
++ */
++ if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
++ return NULL;
++
++ if (is_endbr(insn)) {
+ *on_func_entry = !offset || offset == 4;
+ if (*on_func_entry)
+ offset = 4;
+--
+2.43.0
+
--- /dev/null
+From 63a3f27abc19e572d5e6bf0ccfd9c834dd337906 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Feb 2024 13:07:24 +0106
+Subject: printk: Update @console_may_schedule in console_trylock_spinning()
+
+From: John Ogness <john.ogness@linutronix.de>
+
+[ Upstream commit 8076972468584d4a21dab9aa50e388b3ea9ad8c7 ]
+
+console_trylock_spinning() may takeover the console lock from a
+schedulable context. Update @console_may_schedule to make sure it
+reflects a trylock acquire.
+
+Reported-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Closes: https://lore.kernel.org/lkml/20240222090538.23017-1-quic_mojha@quicinc.com
+Fixes: dbdda842fe96 ("printk: Add console owner and waiter logic to load balance console writes")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/875xybmo2z.fsf@jogness.linutronix.de
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/printk/printk.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index a11e1b6f29c04..7a835b277e98d 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2026,6 +2026,12 @@ static int console_trylock_spinning(void)
+ */
+ mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+
++ /*
++ * Update @console_may_schedule for trylock because the previous
++ * owner may have been schedulable.
++ */
++ console_may_schedule = 0;
++
+ return 1;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 46caf499df557030e71a319e7421734dec945c36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Mar 2024 09:36:02 +0100
+Subject: pwm: img: fix pwm clock lookup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zoltan HERPAI <wigyori@uid0.hu>
+
+[ Upstream commit 9eb05877dbee03064d3d3483cd6702f610d5a358 ]
+
+22e8e19 has introduced a regression in the imgchip->pwm_clk lookup, whereas
+the clock name has also been renamed to "imgchip". This causes the driver
+failing to load:
+
+[ 0.546905] img-pwm 18101300.pwm: failed to get imgchip clock
+[ 0.553418] img-pwm: probe of 18101300.pwm failed with error -2
+
+Fix this lookup by reverting the clock name back to "pwm".
+
+Signed-off-by: Zoltan HERPAI <wigyori@uid0.hu>
+Link: https://lore.kernel.org/r/20240320083602.81592-1-wigyori@uid0.hu
+Fixes: 22e8e19a46f7 ("pwm: img: Rename variable pointing to driver private data")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pwm/pwm-img.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
+index 116fa060e3029..29dcf38f3b521 100644
+--- a/drivers/pwm/pwm-img.c
++++ b/drivers/pwm/pwm-img.c
+@@ -288,9 +288,9 @@ static int img_pwm_probe(struct platform_device *pdev)
+ return PTR_ERR(imgchip->sys_clk);
+ }
+
+- imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
++ imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+ if (IS_ERR(imgchip->pwm_clk)) {
+- dev_err(&pdev->dev, "failed to get imgchip clock\n");
++ dev_err(&pdev->dev, "failed to get pwm clock\n");
+ return PTR_ERR(imgchip->pwm_clk);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 843071c030ca3a2a84dd09b166983986a33e2e5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Mar 2024 05:34:44 +0300
+Subject: selftests/mm: Fix build with _FORTIFY_SOURCE
+
+From: Vitaly Chikunov <vt@altlinux.org>
+
+[ Upstream commit 8b65ef5ad4862904e476a8f3d4e4418c950ddb90 ]
+
+Add missing flags argument to open(2) call with O_CREAT.
+
+Some tests fail to compile if _FORTIFY_SOURCE is defined (to any valid
+value) (together with -O), resulting in similar error messages such as:
+
+ In file included from /usr/include/fcntl.h:342,
+ from gup_test.c:1:
+ In function 'open',
+ inlined from 'main' at gup_test.c:206:10:
+ /usr/include/bits/fcntl2.h:50:11: error: call to '__open_missing_mode' declared with attribute error: open with O_CREAT or O_TMPFILE in second argument needs 3 arguments
+ 50 | __open_missing_mode ();
+ | ^~~~~~~~~~~~~~~~~~~~~~
+
+_FORTIFY_SOURCE is enabled by default in some distributions, so the
+tests are not built by default and are skipped.
+
+open(2) man-page warns about missing flags argument: "if it is not
+supplied, some arbitrary bytes from the stack will be applied as the
+file mode."
+
+Link: https://lkml.kernel.org/r/20240318023445.3192922-1-vt@altlinux.org
+Fixes: aeb85ed4f41a ("tools/testing/selftests/vm/gup_benchmark.c: allow user specified file")
+Fixes: fbe37501b252 ("mm: huge_memory: debugfs for file-backed THP split")
+Fixes: c942f5bd17b3 ("selftests: soft-dirty: add test for mprotect")
+Signed-off-by: Vitaly Chikunov <vt@altlinux.org>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Keith Busch <kbusch@kernel.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mm/gup_test.c | 2 +-
+ tools/testing/selftests/mm/soft-dirty.c | 2 +-
+ tools/testing/selftests/mm/split_huge_page_test.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
+index cbe99594d319b..18a49c70d4c63 100644
+--- a/tools/testing/selftests/mm/gup_test.c
++++ b/tools/testing/selftests/mm/gup_test.c
+@@ -203,7 +203,7 @@ int main(int argc, char **argv)
+ ksft_print_header();
+ ksft_set_plan(nthreads);
+
+- filed = open(file, O_RDWR|O_CREAT);
++ filed = open(file, O_RDWR|O_CREAT, 0664);
+ if (filed < 0)
+ ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
+
+diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
+index cc5f144430d4d..7dbfa53d93a05 100644
+--- a/tools/testing/selftests/mm/soft-dirty.c
++++ b/tools/testing/selftests/mm/soft-dirty.c
+@@ -137,7 +137,7 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
+ if (!map)
+ ksft_exit_fail_msg("anon mmap failed\n");
+ } else {
+- test_fd = open(fname, O_RDWR | O_CREAT);
++ test_fd = open(fname, O_RDWR | O_CREAT, 0664);
+ if (test_fd < 0) {
+ ksft_test_result_skip("Test %s open() file failed\n", __func__);
+ return;
+diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
+index 0e74635c8c3d9..dff3be23488b4 100644
+--- a/tools/testing/selftests/mm/split_huge_page_test.c
++++ b/tools/testing/selftests/mm/split_huge_page_test.c
+@@ -253,7 +253,7 @@ void split_file_backed_thp(void)
+ goto cleanup;
+ }
+
+- fd = open(testfile, O_CREAT|O_WRONLY);
++ fd = open(testfile, O_CREAT|O_WRONLY, 0664);
+ if (fd == -1) {
+ perror("Cannot open testing file\n");
+ goto cleanup;
+--
+2.43.0
+
--- /dev/null
+From fcb5dbdc5b4a2c20673aa6eaaf2a4d68dfed9b9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jan 2024 10:38:06 +0500
+Subject: selftests/mm: gup_test: conform test to TAP format output
+
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+
+[ Upstream commit cb6e7cae18868422a23d62670110c61fd1b15029 ]
+
+Conform the layout, informational and status messages to TAP. No
+functional change is intended other than the layout of output messages.
+
+Link: https://lkml.kernel.org/r/20240102053807.2114200-1-usama.anjum@collabora.com
+Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 8b65ef5ad486 ("selftests/mm: Fix build with _FORTIFY_SOURCE")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mm/gup_test.c | 65 ++++++++++++++-------------
+ 1 file changed, 33 insertions(+), 32 deletions(-)
+
+diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
+index ec22291363844..cbe99594d319b 100644
+--- a/tools/testing/selftests/mm/gup_test.c
++++ b/tools/testing/selftests/mm/gup_test.c
+@@ -50,39 +50,41 @@ static char *cmd_to_str(unsigned long cmd)
+ void *gup_thread(void *data)
+ {
+ struct gup_test gup = *(struct gup_test *)data;
+- int i;
++ int i, status;
+
+ /* Only report timing information on the *_BENCHMARK commands: */
+ if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) ||
+ (cmd == PIN_LONGTERM_BENCHMARK)) {
+ for (i = 0; i < repeats; i++) {
+ gup.size = size;
+- if (ioctl(gup_fd, cmd, &gup))
+- perror("ioctl"), exit(1);
++ status = ioctl(gup_fd, cmd, &gup);
++ if (status)
++ break;
+
+ pthread_mutex_lock(&print_mutex);
+- printf("%s: Time: get:%lld put:%lld us",
+- cmd_to_str(cmd), gup.get_delta_usec,
+- gup.put_delta_usec);
++ ksft_print_msg("%s: Time: get:%lld put:%lld us",
++ cmd_to_str(cmd), gup.get_delta_usec,
++ gup.put_delta_usec);
+ if (gup.size != size)
+- printf(", truncated (size: %lld)", gup.size);
+- printf("\n");
++ ksft_print_msg(", truncated (size: %lld)", gup.size);
++ ksft_print_msg("\n");
+ pthread_mutex_unlock(&print_mutex);
+ }
+ } else {
+ gup.size = size;
+- if (ioctl(gup_fd, cmd, &gup)) {
+- perror("ioctl");
+- exit(1);
+- }
++ status = ioctl(gup_fd, cmd, &gup);
++ if (status)
++ goto return_;
+
+ pthread_mutex_lock(&print_mutex);
+- printf("%s: done\n", cmd_to_str(cmd));
++ ksft_print_msg("%s: done\n", cmd_to_str(cmd));
+ if (gup.size != size)
+- printf("Truncated (size: %lld)\n", gup.size);
++ ksft_print_msg("Truncated (size: %lld)\n", gup.size);
+ pthread_mutex_unlock(&print_mutex);
+ }
+
++return_:
++ ksft_test_result(!status, "ioctl status %d\n", status);
+ return NULL;
+ }
+
+@@ -170,7 +172,7 @@ int main(int argc, char **argv)
+ touch = 1;
+ break;
+ default:
+- return -1;
++ ksft_exit_fail_msg("Wrong argument\n");
+ }
+ }
+
+@@ -198,11 +200,12 @@ int main(int argc, char **argv)
+ }
+ }
+
++ ksft_print_header();
++ ksft_set_plan(nthreads);
++
+ filed = open(file, O_RDWR|O_CREAT);
+- if (filed < 0) {
+- perror("open");
+- exit(filed);
+- }
++ if (filed < 0)
++ ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
+
+ gup.nr_pages_per_call = nr_pages;
+ if (write)
+@@ -213,27 +216,24 @@ int main(int argc, char **argv)
+ switch (errno) {
+ case EACCES:
+ if (getuid())
+- printf("Please run this test as root\n");
++ ksft_print_msg("Please run this test as root\n");
+ break;
+ case ENOENT:
+- if (opendir("/sys/kernel/debug") == NULL) {
+- printf("mount debugfs at /sys/kernel/debug\n");
+- break;
+- }
+- printf("check if CONFIG_GUP_TEST is enabled in kernel config\n");
++ if (opendir("/sys/kernel/debug") == NULL)
++ ksft_print_msg("mount debugfs at /sys/kernel/debug\n");
++ ksft_print_msg("check if CONFIG_GUP_TEST is enabled in kernel config\n");
+ break;
+ default:
+- perror("failed to open " GUP_TEST_FILE);
++ ksft_print_msg("failed to open %s: %s\n", GUP_TEST_FILE, strerror(errno));
+ break;
+ }
+- exit(KSFT_SKIP);
++ ksft_test_result_skip("Please run this test as root\n");
++ return ksft_exit_pass();
+ }
+
+ p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
+- if (p == MAP_FAILED) {
+- perror("mmap");
+- exit(1);
+- }
++ if (p == MAP_FAILED)
++ ksft_exit_fail_msg("mmap: %s\n", strerror(errno));
+ gup.addr = (unsigned long)p;
+
+ if (thp == 1)
+@@ -264,7 +264,8 @@ int main(int argc, char **argv)
+ ret = pthread_join(tid[i], NULL);
+ assert(ret == 0);
+ }
++
+ free(tid);
+
+- return 0;
++ return ksft_exit_pass();
+ }
+--
+2.43.0
+
fix-memory-leak-in-posix_clock_open.patch
wifi-rtw88-8821cu-fix-connection-failure.patch
btrfs-fix-deadlock-with-fiemap-and-extent-locking.patch
+clocksource-drivers-arm_global_timer-fix-maximum-pre.patch
+arm-9352-1-iwmmxt-remove-support-for-pj4-pj4b-cores.patch
+arm-9359-1-flush-check-if-the-folio-is-reserved-for-.patch
+entry-respect-changes-to-system-call-number-by-trace.patch
+swiotlb-fix-double-allocation-of-slots-due-to-broken.patch
+swiotlb-honour-dma_alloc_coherent-alignment-in-swiot.patch
+swiotlb-fix-alignment-checks-when-both-allocation-an.patch
+iommu-dma-force-swiotlb_max_mapping_size-on-an-untru.patch
+printk-update-console_may_schedule-in-console_tryloc.patch
+irqchip-renesas-rzg2l-implement-restriction-when-wri.patch
+irqchip-renesas-rzg2l-flush-posted-write-in-irq_eoi.patch
+irqchip-renesas-rzg2l-add-macro-to-retrieve-titsr-re.patch
+irqchip-renesas-rzg2l-rename-rzg2l_tint_eoi.patch
+irqchip-renesas-rzg2l-rename-rzg2l_irq_eoi.patch
+irqchip-renesas-rzg2l-prevent-spurious-interrupts-wh.patch
+kprobes-x86-use-copy_from_kernel_nofault-to-read-fro.patch
+efi-libstub-fix-efi_random_alloc-to-allocate-memory-.patch
+x86-mpparse-register-apic-address-only-once.patch
+x86-fpu-keep-xfd_state-in-sync-with-msr_ia32_xfd.patch
+efi-fix-panic-in-kdump-kernel.patch
+pwm-img-fix-pwm-clock-lookup.patch
+selftests-mm-gup_test-conform-test-to-tap-format-out.patch
+selftests-mm-fix-build-with-_fortify_source.patch
--- /dev/null
+From 322d18002ab247affa0edfd08ca6c195856e9979 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:28:27 +0000
+Subject: swiotlb: Fix alignment checks when both allocation and DMA masks are
+ present
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit 51b30ecb73b481d5fac6ccf2ecb4a309c9ee3310 ]
+
+Nicolin reports that swiotlb buffer allocations fail for an NVME device
+behind an IOMMU using 64KiB pages. This is because we end up with a
+minimum allocation alignment of 64KiB (for the IOMMU to map the buffer
+safely) but a minimum DMA alignment mask corresponding to a 4KiB NVME
+page (i.e. preserving the 4KiB page offset from the original allocation).
+If the original address is not 4KiB-aligned, the allocation will fail
+because swiotlb_search_pool_area() erroneously compares these unmasked
+bits with the 64KiB-aligned candidate allocation.
+
+Tweak swiotlb_search_pool_area() so that the DMA alignment mask is
+reduced based on the required alignment of the allocation.
+
+Fixes: 82612d66d51d ("iommu: Allow the dma-iommu api to use bounce buffers")
+Link: https://lore.kernel.org/r/cover.1707851466.git.nicolinc@nvidia.com
+Reported-by: Nicolin Chen <nicolinc@nvidia.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Tested-by: Nicolin Chen <nicolinc@nvidia.com>
+Tested-by: Michael Kelley <mhklinux@outlook.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 2d347685cf566..9edfb3b7702bb 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -981,8 +981,7 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ dma_addr_t tbl_dma_addr =
+ phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
+ unsigned long max_slots = get_max_slots(boundary_mask);
+- unsigned int iotlb_align_mask =
+- dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
++ unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
+ unsigned int nslots = nr_slots(alloc_size), stride;
+ unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+ unsigned int index, slots_checked, count = 0, i;
+@@ -993,6 +992,14 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ BUG_ON(!nslots);
+ BUG_ON(area_index >= pool->nareas);
+
++ /*
++ * Ensure that the allocation is at least slot-aligned and update
++ * 'iotlb_align_mask' to ignore bits that will be preserved when
++ * offsetting into the allocation.
++ */
++ alloc_align_mask |= (IO_TLB_SIZE - 1);
++ iotlb_align_mask &= ~alloc_align_mask;
++
+ /*
+ * For mappings with an alignment requirement don't bother looping to
+ * unaligned slots once we found an aligned one.
+--
+2.43.0
+
--- /dev/null
+From 885decec9237c9cc85a337605e478efbb54f5cc7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:28:24 +0000
+Subject: swiotlb: Fix double-allocation of slots due to broken alignment
+ handling
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit 04867a7a33324c9c562ee7949dbcaab7aaad1fb4 ]
+
+Commit bbb73a103fbb ("swiotlb: fix a braino in the alignment check fix"),
+which was a fix for commit 0eee5ae10256 ("swiotlb: fix slot alignment
+checks"), causes a functional regression with vsock in a virtual machine
+using bouncing via a restricted DMA SWIOTLB pool.
+
+When virtio allocates the virtqueues for the vsock device using
+dma_alloc_coherent(), the SWIOTLB search can return page-unaligned
+allocations if 'area->index' was left unaligned by a previous allocation
+from the buffer:
+
+ # Final address in brackets is the SWIOTLB address returned to the caller
+ | virtio-pci 0000:00:07.0: orig_addr 0x0 alloc_size 0x2000, iotlb_align_mask 0x800 stride 0x2: got slot 1645-1649/7168 (0x98326800)
+ | virtio-pci 0000:00:07.0: orig_addr 0x0 alloc_size 0x2000, iotlb_align_mask 0x800 stride 0x2: got slot 1649-1653/7168 (0x98328800)
+ | virtio-pci 0000:00:07.0: orig_addr 0x0 alloc_size 0x2000, iotlb_align_mask 0x800 stride 0x2: got slot 1653-1657/7168 (0x9832a800)
+
+This ends badly (typically buffer corruption and/or a hang) because
+swiotlb_alloc() is expecting a page-aligned allocation and so blindly
+returns a pointer to the 'struct page' corresponding to the allocation,
+therefore double-allocating the first half (2KiB slot) of the 4KiB page.
+
+Fix the problem by treating the allocation alignment separately to any
+additional alignment requirements from the device, using the maximum
+of the two as the stride to search the buffer slots and taking care
+to ensure a minimum of page-alignment for buffers larger than a page.
+
+This also resolves swiotlb allocation failures occuring due to the
+inclusion of ~PAGE_MASK in 'iotlb_align_mask' for large allocations and
+resulting in alignment requirements exceeding swiotlb_max_mapping_size().
+
+Fixes: bbb73a103fbb ("swiotlb: fix a braino in the alignment check fix")
+Fixes: 0eee5ae10256 ("swiotlb: fix slot alignment checks")
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Reviewed-by: Petr Tesarik <petr.tesarik1@huawei-partners.com>
+Tested-by: Nicolin Chen <nicolinc@nvidia.com>
+Tested-by: Michael Kelley <mhklinux@outlook.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 33d942615be54..8fba61069b84d 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -982,7 +982,7 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
+ unsigned long max_slots = get_max_slots(boundary_mask);
+ unsigned int iotlb_align_mask =
+- dma_get_min_align_mask(dev) | alloc_align_mask;
++ dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
+ unsigned int nslots = nr_slots(alloc_size), stride;
+ unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+ unsigned int index, slots_checked, count = 0, i;
+@@ -994,18 +994,17 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ BUG_ON(area_index >= pool->nareas);
+
+ /*
+- * For allocations of PAGE_SIZE or larger only look for page aligned
+- * allocations.
++ * For mappings with an alignment requirement don't bother looping to
++ * unaligned slots once we found an aligned one.
+ */
+- if (alloc_size >= PAGE_SIZE)
+- iotlb_align_mask |= ~PAGE_MASK;
+- iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
++ stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
+
+ /*
+- * For mappings with an alignment requirement don't bother looping to
+- * unaligned slots once we found an aligned one.
++ * For allocations of PAGE_SIZE or larger only look for page aligned
++ * allocations.
+ */
+- stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
++ if (alloc_size >= PAGE_SIZE)
++ stride = umax(stride, PAGE_SHIFT - IO_TLB_SHIFT + 1);
+
+ spin_lock_irqsave(&area->lock, flags);
+ if (unlikely(nslots > pool->area_nslabs - area->used))
+@@ -1015,11 +1014,14 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ index = area->index;
+
+ for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
++ phys_addr_t tlb_addr;
++
+ slot_index = slot_base + index;
++ tlb_addr = slot_addr(tbl_dma_addr, slot_index);
+
+- if (orig_addr &&
+- (slot_addr(tbl_dma_addr, slot_index) &
+- iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
++ if ((tlb_addr & alloc_align_mask) ||
++ (orig_addr && (tlb_addr & iotlb_align_mask) !=
++ (orig_addr & iotlb_align_mask))) {
+ index = wrap_area_index(pool, index + 1);
+ slots_checked++;
+ continue;
+--
+2.43.0
+
--- /dev/null
+From 3ad143d8fff1bc743dc2028b6abe23fbb52a1732 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Mar 2024 15:28:26 +0000
+Subject: swiotlb: Honour dma_alloc_coherent() alignment in swiotlb_alloc()
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit cbf53074a528191df82b4dba1e3d21191102255e ]
+
+core-api/dma-api-howto.rst states the following properties of
+dma_alloc_coherent():
+
+ | The CPU virtual address and the DMA address are both guaranteed to
+ | be aligned to the smallest PAGE_SIZE order which is greater than or
+ | equal to the requested size.
+
+However, swiotlb_alloc() passes zero for the 'alloc_align_mask'
+parameter of swiotlb_find_slots() and so this property is not upheld.
+Instead, allocations larger than a page are aligned to PAGE_SIZE,
+
+Calculate the mask corresponding to the page order suitable for holding
+the allocation and pass that to swiotlb_find_slots().
+
+Fixes: e81e99bacc9f ("swiotlb: Support aligned swiotlb buffers")
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Reviewed-by: Petr Tesarik <petr.tesarik1@huawei-partners.com>
+Tested-by: Nicolin Chen <nicolinc@nvidia.com>
+Tested-by: Michael Kelley <mhklinux@outlook.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 8fba61069b84d..2d347685cf566 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -1610,12 +1610,14 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+ struct io_tlb_pool *pool;
+ phys_addr_t tlb_addr;
++ unsigned int align;
+ int index;
+
+ if (!mem)
+ return NULL;
+
+- index = swiotlb_find_slots(dev, 0, size, 0, &pool);
++ align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
++ index = swiotlb_find_slots(dev, 0, size, align, &pool);
+ if (index == -1)
+ return NULL;
+
+--
+2.43.0
+
--- /dev/null
+From 00275ee1c451095077d8e7c799668c7ada44a64f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Mar 2024 16:04:39 -0700
+Subject: x86/fpu: Keep xfd_state in sync with MSR_IA32_XFD
+
+From: Adamos Ttofari <attofari@amazon.de>
+
+[ Upstream commit 10e4b5166df9ff7a2d5316138ca668b42d004422 ]
+
+Commit 672365477ae8 ("x86/fpu: Update XFD state where required") and
+commit 8bf26758ca96 ("x86/fpu: Add XFD state to fpstate") introduced a
+per CPU variable xfd_state to keep the MSR_IA32_XFD value cached, in
+order to avoid unnecessary writes to the MSR.
+
+On CPU hotplug MSR_IA32_XFD is reset to the init_fpstate.xfd, which
+wipes out any stale state. But the per CPU cached xfd value is not
+reset, which brings them out of sync.
+
+As a consequence a subsequent xfd_update_state() might fail to update
+the MSR which in turn can result in XRSTOR raising a #NM in kernel
+space, which crashes the kernel.
+
+To fix this, introduce xfd_set_state() to write xfd_state together
+with MSR_IA32_XFD, and use it in all places that set MSR_IA32_XFD.
+
+Fixes: 672365477ae8 ("x86/fpu: Update XFD state where required")
+Signed-off-by: Adamos Ttofari <attofari@amazon.de>
+Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20240322230439.456571-1-chang.seok.bae@intel.com
+
+Closes: https://lore.kernel.org/lkml/20230511152818.13839-1-attofari@amazon.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/fpu/xstate.c | 5 +++--
+ arch/x86/kernel/fpu/xstate.h | 14 ++++++++++----
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 117e74c44e756..33a214b1a4cec 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -178,10 +178,11 @@ void fpu__init_cpu_xstate(void)
+ * Must happen after CR4 setup and before xsetbv() to allow KVM
+ * lazy passthrough. Write independent of the dynamic state static
+ * key as that does not work on the boot CPU. This also ensures
+- * that any stale state is wiped out from XFD.
++ * that any stale state is wiped out from XFD. Reset the per CPU
++ * xfd cache too.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_XFD))
+- wrmsrl(MSR_IA32_XFD, init_fpstate.xfd);
++ xfd_set_state(init_fpstate.xfd);
+
+ /*
+ * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 3518fb26d06b0..19ca623ffa2ac 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -148,20 +148,26 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs
+ #endif
+
+ #ifdef CONFIG_X86_64
++static inline void xfd_set_state(u64 xfd)
++{
++ wrmsrl(MSR_IA32_XFD, xfd);
++ __this_cpu_write(xfd_state, xfd);
++}
++
+ static inline void xfd_update_state(struct fpstate *fpstate)
+ {
+ if (fpu_state_size_dynamic()) {
+ u64 xfd = fpstate->xfd;
+
+- if (__this_cpu_read(xfd_state) != xfd) {
+- wrmsrl(MSR_IA32_XFD, xfd);
+- __this_cpu_write(xfd_state, xfd);
+- }
++ if (__this_cpu_read(xfd_state) != xfd)
++ xfd_set_state(xfd);
+ }
+ }
+
+ extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
+ #else
++static inline void xfd_set_state(u64 xfd) { }
++
+ static inline void xfd_update_state(struct fpstate *fpstate) { }
+
+ static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
+--
+2.43.0
+
--- /dev/null
+From acee706d51329a4056b58fe7aa33e9365dfb52d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Mar 2024 19:56:39 +0100
+Subject: x86/mpparse: Register APIC address only once
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit f2208aa12c27bfada3c15c550c03ca81d42dcac2 ]
+
+The APIC address is registered twice. First during the early detection and
+afterwards when actually scanning the table for APIC IDs. The APIC and
+topology core warn about the second attempt.
+
+Restrict it to the early detection call.
+
+Fixes: 81287ad65da5 ("x86/apic: Sanitize APIC address setup")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lore.kernel.org/r/20240322185305.297774848@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/mpparse.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
+index b223922248e9f..15c700d358700 100644
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -196,12 +196,12 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
+ if (!smp_check_mpc(mpc, oem, str))
+ return 0;
+
+- /* Initialize the lapic mapping */
+- if (!acpi_lapic)
+- register_lapic_address(mpc->lapic);
+-
+- if (early)
++ if (early) {
++ /* Initialize the lapic mapping */
++ if (!acpi_lapic)
++ register_lapic_address(mpc->lapic);
+ return 1;
++ }
+
+ /* Now process the configuration blocks. */
+ while (count < mpc->length) {
+--
+2.43.0
+