--- /dev/null
+From 139d42ca51018c1d43ab5f35829179f060d1ab31 Mon Sep 17 00:00:00 2001
+From: Kanglong Wang <wangkanglong@loongson.cn>
+Date: Tue, 12 Nov 2024 16:35:39 +0800
+Subject: LoongArch: Add WriteCombine shadow mapping in KASAN
+
+From: Kanglong Wang <wangkanglong@loongson.cn>
+
+commit 139d42ca51018c1d43ab5f35829179f060d1ab31 upstream.
+
+Currently, the kernel couldn't boot when ARCH_IOREMAP, ARCH_WRITECOMBINE
+and KASAN are enabled together. Because DMW2 is used by kernel now which
+is configured as 0xa000000000000000 for WriteCombine, but KASAN has no
+segment mapping for it. This patch fix this issue.
+
+Solution: Add the relevant definitions for WriteCombine (DMW2) in KASAN.
+
+Cc: stable@vger.kernel.org
+Fixes: 8e02c3b782ec ("LoongArch: Add writecombine support for DMW-based ioremap()")
+Signed-off-by: Kanglong Wang <wangkanglong@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/kasan.h | 11 ++++++++++-
+ arch/loongarch/mm/kasan_init.c | 5 +++++
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/include/asm/kasan.h
++++ b/arch/loongarch/include/asm/kasan.h
+@@ -25,6 +25,7 @@
+ /* 64-bit segment value. */
+ #define XKPRANGE_UC_SEG (0x8000)
+ #define XKPRANGE_CC_SEG (0x9000)
++#define XKPRANGE_WC_SEG (0xa000)
+ #define XKVRANGE_VC_SEG (0xffff)
+
+ /* Cached */
+@@ -41,10 +42,17 @@
+ #define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+ #define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
+
++/* WriteCombine */
++#define XKPRANGE_WC_START WRITECOMBINE_BASE
++#define XKPRANGE_WC_SIZE XRANGE_SIZE
++#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
++#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
++#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE)
++
+ /* VMALLOC (Cached or UnCached) */
+ #define XKVRANGE_VC_START MODULES_VADDR
+ #define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
+-#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
++#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END
+ #define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+ #define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
+
+@@ -55,6 +63,7 @@
+
+ #define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
+ #define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
++#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET)
+ #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
+
+ extern bool kasan_early_stage;
+--- a/arch/loongarch/mm/kasan_init.c
++++ b/arch/loongarch/mm/kasan_init.c
+@@ -62,6 +62,9 @@ void *kasan_mem_to_shadow(const void *ad
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
++ case XKPRANGE_WC_SEG:
++ offset = XKPRANGE_WC_SHADOW_OFFSET;
++ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+@@ -86,6 +89,8 @@ const void *kasan_shadow_to_mem(const vo
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
++ else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
++ return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
--- /dev/null
+From 5ec5ac4ca27e4daa234540ac32f9fc5219377d53 Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Tue, 10 Feb 2026 19:31:17 +0800
+Subject: LoongArch: Rework KASAN initialization for PTW-enabled systems
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit 5ec5ac4ca27e4daa234540ac32f9fc5219377d53 upstream.
+
+kasan_init_generic() indicates that kasan is fully initialized, so it
+should be put at end of kasan_init().
+
+Otherwise bringing up the primary CPU failed when CONFIG_KASAN is set
+on PTW-enabled systems, here are the call chains:
+
+ kernel_entry()
+ start_kernel()
+ setup_arch()
+ kasan_init()
+ kasan_init_generic()
+
+The reason is PTW-enabled systems have speculative accesses which means
+memory accesses to the shadow memory after kasan_init() may be executed
+by hardware before. However, accessing shadow memory is safe only after
+kasan fully initialized because kasan_init() uses a temporary PGD table
+until we have populated all levels of shadow page tables and writen the
+PGD register. Moving kasan_init_generic() later can defer the occasion
+of kasan_enabled(), so as to avoid speculative accesses on shadow pages.
+
+After moving kasan_init_generic() to the end, kasan_init() can no longer
+call kasan_mem_to_shadow() for shadow address conversion because it will
+always return kasan_early_shadow_page. On the other hand, we should keep
+the current logic of kasan_mem_to_shadow() for both the early and final
+stage because there may be instrumentation before kasan_init().
+
+To solve this, we factor out a new mem_to_shadow() function from current
+kasan_mem_to_shadow() for the shadow address conversion in kasan_init().
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+[ Huacai: To backport from upstream to 6.6 & 6.12, kasan_enabled() is
+ replaced with kasan_arch_is_ready() and kasan_init_generic()
+ is replaced with "kasan_early_stage = false". ]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/mm/kasan_init.c | 77 +++++++++++++++++++++--------------------
+ 1 file changed, 40 insertions(+), 37 deletions(-)
+
+--- a/arch/loongarch/mm/kasan_init.c
++++ b/arch/loongarch/mm/kasan_init.c
+@@ -42,39 +42,43 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD]
+
+ bool kasan_early_stage = true;
+
+-void *kasan_mem_to_shadow(const void *addr)
++static void *mem_to_shadow(const void *addr)
+ {
+- if (!kasan_arch_is_ready()) {
++ unsigned long offset = 0;
++ unsigned long maddr = (unsigned long)addr;
++ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
++
++ if (maddr >= FIXADDR_START)
+ return (void *)(kasan_early_shadow_page);
+- } else {
+- unsigned long maddr = (unsigned long)addr;
+- unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+- unsigned long offset = 0;
+-
+- if (maddr >= FIXADDR_START)
+- return (void *)(kasan_early_shadow_page);
+-
+- maddr &= XRANGE_SHADOW_MASK;
+- switch (xrange) {
+- case XKPRANGE_CC_SEG:
+- offset = XKPRANGE_CC_SHADOW_OFFSET;
+- break;
+- case XKPRANGE_UC_SEG:
+- offset = XKPRANGE_UC_SHADOW_OFFSET;
+- break;
+- case XKPRANGE_WC_SEG:
+- offset = XKPRANGE_WC_SHADOW_OFFSET;
+- break;
+- case XKVRANGE_VC_SEG:
+- offset = XKVRANGE_VC_SHADOW_OFFSET;
+- break;
+- default:
+- WARN_ON(1);
+- return NULL;
+- }
+
+- return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
++ maddr &= XRANGE_SHADOW_MASK;
++ switch (xrange) {
++ case XKPRANGE_CC_SEG:
++ offset = XKPRANGE_CC_SHADOW_OFFSET;
++ break;
++ case XKPRANGE_UC_SEG:
++ offset = XKPRANGE_UC_SHADOW_OFFSET;
++ break;
++ case XKPRANGE_WC_SEG:
++ offset = XKPRANGE_WC_SHADOW_OFFSET;
++ break;
++ case XKVRANGE_VC_SEG:
++ offset = XKVRANGE_VC_SHADOW_OFFSET;
++ break;
++ default:
++ WARN_ON(1);
++ return NULL;
+ }
++
++ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
++}
++
++void *kasan_mem_to_shadow(const void *addr)
++{
++ if (kasan_arch_is_ready())
++ return mem_to_shadow(addr);
++ else
++ return (void *)(kasan_early_shadow_page);
+ }
+
+ const void *kasan_shadow_to_mem(const void *shadow_addr)
+@@ -295,10 +299,8 @@ void __init kasan_init(void)
+ /* Maps everything to a single page of zeroes */
+ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
+
+- kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
+- kasan_mem_to_shadow((void *)KFENCE_AREA_END));
+-
+- kasan_early_stage = false;
++ kasan_populate_early_shadow(mem_to_shadow((void *)VMALLOC_START),
++ mem_to_shadow((void *)KFENCE_AREA_END));
+
+ /* Populate the linear mapping */
+ for_each_mem_range(i, &pa_start, &pa_end) {
+@@ -308,13 +310,13 @@ void __init kasan_init(void)
+ if (start >= end)
+ break;
+
+- kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
+- (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
++ kasan_map_populate((unsigned long)mem_to_shadow(start),
++ (unsigned long)mem_to_shadow(end), NUMA_NO_NODE);
+ }
+
+ /* Populate modules mapping */
+- kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
+- (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
++ kasan_map_populate((unsigned long)mem_to_shadow((void *)MODULES_VADDR),
++ (unsigned long)mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
+ /*
+ * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
+ * should make sure that it maps the zero page read-only.
+@@ -329,5 +331,6 @@ void __init kasan_init(void)
+
+ /* At this point kasan is fully initialized. Enable error messages */
+ init_task.kasan_depth = 0;
++ kasan_early_stage = false;
+ pr_info("KernelAddressSanitizer initialized.\n");
+ }