--- /dev/null
+From e30a0361b8515d424c73c67de1a43e45a13b8ba2 Mon Sep 17 00:00:00 2001
+From: Jared Kangas <jkangas@redhat.com>
+Date: Tue, 19 Nov 2024 13:02:34 -0800
+Subject: kasan: make report_lock a raw spinlock
+
+From: Jared Kangas <jkangas@redhat.com>
+
+commit e30a0361b8515d424c73c67de1a43e45a13b8ba2 upstream.
+
+If PREEMPT_RT is enabled, report_lock is a sleeping spinlock and must not
+be locked when IRQs are disabled. However, KASAN reports may be triggered
+in such contexts. For example:
+
+ char *s = kzalloc(1, GFP_KERNEL);
+ kfree(s);
+ local_irq_disable();
+ char c = *s; /* KASAN report here leads to spin_lock() */
+ local_irq_enable();
+
+Make report_spinlock a raw spinlock to prevent rescheduling when
+PREEMPT_RT is enabled.
+
+Link: https://lkml.kernel.org/r/20241119210234.1602529-1-jkangas@redhat.com
+Fixes: 342a93247e08 ("locking/spinlock: Provide RT variant header: <linux/spinlock_rt.h>")
+Signed-off-by: Jared Kangas <jkangas@redhat.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/report.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -199,7 +199,7 @@ static inline void fail_non_kasan_kunit_
+
+ #endif /* CONFIG_KUNIT */
+
+-static DEFINE_SPINLOCK(report_lock);
++static DEFINE_RAW_SPINLOCK(report_lock);
+
+ static void start_report(unsigned long *flags, bool sync)
+ {
+@@ -210,7 +210,7 @@ static void start_report(unsigned long *
+ lockdep_off();
+ /* Make sure we don't end up in loop. */
+ report_suppress_start();
+- spin_lock_irqsave(&report_lock, *flags);
++ raw_spin_lock_irqsave(&report_lock, *flags);
+ pr_err("==================================================================\n");
+ }
+
+@@ -220,7 +220,7 @@ static void end_report(unsigned long *fl
+ trace_error_report_end(ERROR_DETECTOR_KASAN,
+ (unsigned long)addr);
+ pr_err("==================================================================\n");
+- spin_unlock_irqrestore(&report_lock, *flags);
++ raw_spin_unlock_irqrestore(&report_lock, *flags);
+ if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+ check_panic_on_warn("KASAN");
+ switch (kasan_arg_fault) {
--- /dev/null
+From 5c3793604f91123bf49bc792ce697a0bef4c173c Mon Sep 17 00:00:00 2001
+From: Kees Cook <kees@kernel.org>
+Date: Sun, 17 Nov 2024 03:38:13 -0800
+Subject: lib: stackinit: hide never-taken branch from compiler
+
+From: Kees Cook <kees@kernel.org>
+
+commit 5c3793604f91123bf49bc792ce697a0bef4c173c upstream.
+
+The never-taken branch leads to an invalid bounds condition, which is by
+design. To avoid the unwanted warning from the compiler, hide the
+variable from the optimizer.
+
+../lib/stackinit_kunit.c: In function 'do_nothing_u16_zero':
+../lib/stackinit_kunit.c:51:49: error: array subscript 1 is outside array bounds of 'u16[0]' {aka 'short unsigned int[]'} [-Werror=array-bounds=]
+ 51 | #define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
+ | ^~~~~~
+../lib/stackinit_kunit.c:219:24: note: in expansion of macro 'DO_NOTHING_RETURN_SCALAR'
+ 219 | return DO_NOTHING_RETURN_ ## which(ptr + 1); \
+ | ^~~~~~~~~~~~~~~~~~
+
+Link: https://lkml.kernel.org/r/20241117113813.work.735-kees@kernel.org
+Signed-off-by: Kees Cook <kees@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/stackinit_kunit.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/lib/stackinit_kunit.c
++++ b/lib/stackinit_kunit.c
+@@ -199,6 +199,7 @@ static noinline void test_ ## name (stru
+ static noinline DO_NOTHING_TYPE_ ## which(var_type) \
+ do_nothing_ ## name(var_type *ptr) \
+ { \
++ OPTIMIZER_HIDE_VAR(ptr); \
+ /* Will always be true, but compiler doesn't know. */ \
+ if ((unsigned long)ptr > 0x2) \
+ return DO_NOTHING_RETURN_ ## which(ptr); \
--- /dev/null
+From 914eec5e980171bc128e7e24f7a22aa1d803570e Mon Sep 17 00:00:00 2001
+From: Wengang Wang <wen.gang.wang@oracle.com>
+Date: Tue, 19 Nov 2024 09:45:00 -0800
+Subject: ocfs2: update seq_file index in ocfs2_dlm_seq_next
+
+From: Wengang Wang <wen.gang.wang@oracle.com>
+
+commit 914eec5e980171bc128e7e24f7a22aa1d803570e upstream.
+
+The following INFO level message was seen:
+
+seq_file: buggy .next function ocfs2_dlm_seq_next [ocfs2] did not
+update position index
+
+Fix:
+Update *pos (so m->index) to make seq_read_iter happy though the index its
+self makes no sense to ocfs2_dlm_seq_next.
+
+Link: https://lkml.kernel.org/r/20241119174500.9198-1-wen.gang.wang@oracle.com
+Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/dlmglue.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3110,6 +3110,7 @@ static void *ocfs2_dlm_seq_next(struct s
+ struct ocfs2_lock_res *iter = v;
+ struct ocfs2_lock_res *dummy = &priv->p_iter_res;
+
++ (*pos)++;
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ iter = ocfs2_dlm_next_res(iter, priv);
+ list_del_init(&dummy->l_debug_list);
regmap-detach-regmap-from-dev-on-regmap_exit.patch
mmc-sdhci-pci-add-dmi-quirk-for-missing-cd-gpio-on-vexia-edu-atla-10-tablet.patch
mmc-core-further-prevent-card-detect-during-shutdown.patch
+ocfs2-update-seq_file-index-in-ocfs2_dlm_seq_next.patch
+lib-stackinit-hide-never-taken-branch-from-compiler.patch
+kasan-make-report_lock-a-raw-spinlock.patch
+x86-mm-add-_page_noptishadow-bit-to-avoid-updating-userspace-page-tables.patch
--- /dev/null
+From d0ceea662d459726487030237689835fcc0483e5 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Wed, 4 Dec 2024 11:27:14 +0000
+Subject: x86/mm: Add _PAGE_NOPTISHADOW bit to avoid updating userspace page tables
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit d0ceea662d459726487030237689835fcc0483e5 upstream.
+
+The set_p4d() and set_pgd() functions (in 4-level or 5-level page table setups
+respectively) assume that the root page table is actually a 8KiB allocation,
+with the userspace root immediately after the kernel root page table (so that
+the former can enforce NX on on all the subordinate page tables, which are
+actually shared).
+
+However, users of the kernel_ident_mapping_init() code do not give it an 8KiB
+allocation for its PGD. Both swsusp_arch_resume() and acpi_mp_setup_reset()
+allocate only a single 4KiB page. The kexec code on x86_64 currently gets
+away with it purely by chance, because it allocates 8KiB for its "control
+code page" and then actually uses the first half for the PGD, then copies the
+actual trampoline code into the second half only after the identmap code has
+finished scribbling over it.
+
+Fix this by defining a _PAGE_NOPTISHADOW bit (which can use the same bit as
+_PAGE_SAVED_DIRTY since one is only for the PGD/P4D root and the other is
+exclusively for leaf PTEs.). This instructs __pti_set_user_pgtbl() not to
+write to the userspace 'shadow' PGD.
+
+Strictly, the _PAGE_NOPTISHADOW bit doesn't need to be written out to the
+actual page tables; since __pti_set_user_pgtbl() returns the value to be
+written to the kernel page table, it could be filtered out. But there seems
+to be no benefit to actually doing so.
+
+Suggested-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/412c90a4df7aef077141d9f68d19cbe5602d6c6d.camel@infradead.org
+Cc: stable@kernel.org
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable_types.h | 8 ++++++--
+ arch/x86/mm/ident_map.c | 6 +++---
+ arch/x86/mm/pti.c | 2 +-
+ 3 files changed, 10 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -36,10 +36,12 @@
+ #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
+
+ #ifdef CONFIG_X86_64
+-#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit */
++#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
++#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */
+ #else
+ /* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
+-#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit */
++#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */
++#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */
+ #endif
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -139,6 +141,8 @@
+
+ #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+
++#define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW)
++
+ /*
+ * Set of bits not changed in pte_modify. The pte's
+ * protection key is treated like _PAGE_RW, for
+--- a/arch/x86/mm/ident_map.c
++++ b/arch/x86/mm/ident_map.c
+@@ -88,7 +88,7 @@ static int ident_p4d_init(struct x86_map
+ if (result)
+ return result;
+
+- set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
++ set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
+ }
+
+ return 0;
+@@ -132,14 +132,14 @@ int kernel_ident_mapping_init(struct x86
+ if (result)
+ return result;
+ if (pgtable_l5_enabled()) {
+- set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
++ set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
+ } else {
+ /*
+ * With p4d folded, pgd is equal to p4d.
+ * The pgd entry has to point to the pud page table in this case.
+ */
+ pud_t *pud = pud_offset(p4d, 0);
+- set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
++ set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
+ }
+ }
+
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -132,7 +132,7 @@ pgd_t __pti_set_user_pgtbl(pgd_t *pgdp,
+ * Top-level entries added to init_mm's usermode pgd after boot
+ * will not be automatically propagated to other mms.
+ */
+- if (!pgdp_maps_userspace(pgdp))
++ if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
+ return pgd;
+
+ /*