]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Apr 2023 10:32:09 +0000 (12:32 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Apr 2023 10:32:09 +0000 (12:32 +0200)
added patches:
cifs-avoid-dup-prefix-path-in-dfs_get_automount_devname.patch
kvm-arm64-fix-buffer-overflow-in-kvm_arm_set_fw_reg.patch
kvm-arm64-make-vcpu-flag-updates-non-preemptible.patch
loongarch-make-mstrict-align-configurable.patch
loongarch-make-writecombine-configurable-for-ioremap.patch
mips-define-runtime_discard_exit-in-ld-script.patch

queue-6.2/cifs-avoid-dup-prefix-path-in-dfs_get_automount_devname.patch [new file with mode: 0644]
queue-6.2/kvm-arm64-fix-buffer-overflow-in-kvm_arm_set_fw_reg.patch [new file with mode: 0644]
queue-6.2/kvm-arm64-make-vcpu-flag-updates-non-preemptible.patch [new file with mode: 0644]
queue-6.2/loongarch-make-mstrict-align-configurable.patch [new file with mode: 0644]
queue-6.2/loongarch-make-writecombine-configurable-for-ioremap.patch [new file with mode: 0644]
queue-6.2/mips-define-runtime_discard_exit-in-ld-script.patch [new file with mode: 0644]
queue-6.2/series

diff --git a/queue-6.2/cifs-avoid-dup-prefix-path-in-dfs_get_automount_devname.patch b/queue-6.2/cifs-avoid-dup-prefix-path-in-dfs_get_automount_devname.patch
new file mode 100644 (file)
index 0000000..d514785
--- /dev/null
@@ -0,0 +1,94 @@
+From d5a863a153e90996ab2aef6b9e08d509f4d5662b Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Sun, 16 Apr 2023 15:38:28 -0300
+Subject: cifs: avoid dup prefix path in dfs_get_automount_devname()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit d5a863a153e90996ab2aef6b9e08d509f4d5662b upstream.
+
+@server->origin_fullpath already contains the tree name + optional
+prefix, so avoid calling __build_path_from_dentry_optional_prefix() as
+it might end up duplicating prefix path from @cifs_sb->prepath into
+final full path.
+
+Instead, generate DFS full path by simply merging
+@server->origin_fullpath with dentry's path.
+
+This fixes the following case
+
+       mount.cifs //root/dfs/dir /mnt/ -o ...
+       ls /mnt/link
+
+where cifs_dfs_do_automount() will call smb3_parse_devname() with
+@devname set to "//root/dfs/dir/link" instead of
+"//root/dfs/dir/dir/link".
+
+Fixes: 7ad54b98fc1f ("cifs: use origin fullpath for automounts")
+Cc: <stable@vger.kernel.org> # 6.2+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/cifs_dfs_ref.c |  2 --
+ fs/cifs/dfs.h          | 22 ++++++++++++++++++----
+ 2 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index cb40074feb3e..0329a907bdfe 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -171,8 +171,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path)
+               mnt = ERR_CAST(full_path);
+               goto out;
+       }
+-
+-      convert_delimiter(full_path, '/');
+       cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
+       tmp = *cur_ctx;
+diff --git a/fs/cifs/dfs.h b/fs/cifs/dfs.h
+index 13f26e01f7b9..0b8cbf721fff 100644
+--- a/fs/cifs/dfs.h
++++ b/fs/cifs/dfs.h
+@@ -34,19 +34,33 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
+                             cifs_remap(cifs_sb), path, ref, tl);
+ }
++/* Return DFS full path out of a dentry set for automount */
+ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
+ {
+       struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct TCP_Server_Info *server = tcon->ses->server;
++      size_t len;
++      char *s;
+       if (unlikely(!server->origin_fullpath))
+               return ERR_PTR(-EREMOTE);
+-      return __build_path_from_dentry_optional_prefix(dentry, page,
+-                                                      server->origin_fullpath,
+-                                                      strlen(server->origin_fullpath),
+-                                                      true);
++      s = dentry_path_raw(dentry, page, PATH_MAX);
++      if (IS_ERR(s))
++              return s;
++      /* for root, we want "" */
++      if (!s[1])
++              s++;
++
++      len = strlen(server->origin_fullpath);
++      if (s < (char *)page + len)
++              return ERR_PTR(-ENAMETOOLONG);
++
++      s -= len;
++      memcpy(s, server->origin_fullpath, len);
++      convert_delimiter(s, '/');
++      return s;
+ }
+ static inline void dfs_put_root_smb_sessions(struct list_head *head)
+-- 
+2.40.0
+
diff --git a/queue-6.2/kvm-arm64-fix-buffer-overflow-in-kvm_arm_set_fw_reg.patch b/queue-6.2/kvm-arm64-fix-buffer-overflow-in-kvm_arm_set_fw_reg.patch
new file mode 100644 (file)
index 0000000..0b9bee6
--- /dev/null
@@ -0,0 +1,36 @@
+From a25bc8486f9c01c1af6b6c5657234b2eee2c39d6 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Wed, 19 Apr 2023 13:16:13 +0300
+Subject: KVM: arm64: Fix buffer overflow in kvm_arm_set_fw_reg()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit a25bc8486f9c01c1af6b6c5657234b2eee2c39d6 upstream.
+
+The KVM_REG_SIZE() comes from the ioctl and it can be a power of two
+between 0-32768 but if it is more than sizeof(long) this will corrupt
+memory.
+
+Fixes: 99adb567632b ("KVM: arm/arm64: Add save/restore support for firmware workaround state")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/4efbab8c-640f-43b2-8ac6-6d68e08280fe@kili.mountain
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/hypercalls.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *
+       u64 val;
+       int wa_level;
++      if (KVM_REG_SIZE(reg->id) != sizeof(val))
++              return -ENOENT;
+       if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
diff --git a/queue-6.2/kvm-arm64-make-vcpu-flag-updates-non-preemptible.patch b/queue-6.2/kvm-arm64-make-vcpu-flag-updates-non-preemptible.patch
new file mode 100644 (file)
index 0000000..33cff4f
--- /dev/null
@@ -0,0 +1,91 @@
+From 35dcb3ac663a16510afc27ba2725d70c15e012a5 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 18 Apr 2023 13:57:37 +0100
+Subject: KVM: arm64: Make vcpu flag updates non-preemptible
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 35dcb3ac663a16510afc27ba2725d70c15e012a5 upstream.
+
+Per-vcpu flags are updated using a non-atomic RMW operation.
+Which means it is possible to get preempted between the read and
+write operations.
+
+Another interesting thing to note is that preemption also updates
+flags, as we have some flag manipulation in both the load and put
+operations.
+
+It is thus possible to lose information communicated by either
+load or put, as the preempted flag update will overwrite the flags
+when the thread is resumed. This is specially critical if either
+load or put has stored information which depends on the physical
+CPU the vcpu runs on.
+
+This results in really elusive bugs, and kudos must be given to
+Mostafa for the long hours of debugging, and finally spotting
+the problem.
+
+Fix it by disabling preemption during the RMW operation, which
+ensures that the state stays consistent. Also upgrade vcpu_get_flag
+path to use READ_ONCE() to make sure the field is always atomically
+accessed.
+
+Fixes: e87abb73e594 ("KVM: arm64: Add helpers to manipulate vcpu flags among a set")
+Reported-by: Mostafa Saleh <smostafa@google.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230418125737.2327972-1-maz@kernel.org
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h |   19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -533,9 +533,22 @@ struct kvm_vcpu_arch {
+       ({                                                      \
+               __build_check_flag(v, flagset, f, m);           \
+                                                               \
+-              v->arch.flagset & (m);                          \
++              READ_ONCE(v->arch.flagset) & (m);               \
+       })
++/*
++ * Note that the set/clear accessors must be preempt-safe in order to
++ * avoid nesting them with load/put which also manipulate flags...
++ */
++#ifdef __KVM_NVHE_HYPERVISOR__
++/* the nVHE hypervisor is always non-preemptible */
++#define __vcpu_flags_preempt_disable()
++#define __vcpu_flags_preempt_enable()
++#else
++#define __vcpu_flags_preempt_disable()        preempt_disable()
++#define __vcpu_flags_preempt_enable() preempt_enable()
++#endif
++
+ #define __vcpu_set_flag(v, flagset, f, m)                     \
+       do {                                                    \
+               typeof(v->arch.flagset) *fset;                  \
+@@ -543,9 +556,11 @@ struct kvm_vcpu_arch {
+               __build_check_flag(v, flagset, f, m);           \
+                                                               \
+               fset = &v->arch.flagset;                        \
++              __vcpu_flags_preempt_disable();                 \
+               if (HWEIGHT(m) > 1)                             \
+                       *fset &= ~(m);                          \
+               *fset |= (f);                                   \
++              __vcpu_flags_preempt_enable();                  \
+       } while (0)
+ #define __vcpu_clear_flag(v, flagset, f, m)                   \
+@@ -555,7 +570,9 @@ struct kvm_vcpu_arch {
+               __build_check_flag(v, flagset, f, m);           \
+                                                               \
+               fset = &v->arch.flagset;                        \
++              __vcpu_flags_preempt_disable();                 \
+               *fset &= ~(m);                                  \
++              __vcpu_flags_preempt_enable();                  \
+       } while (0)
+ #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
diff --git a/queue-6.2/loongarch-make-mstrict-align-configurable.patch b/queue-6.2/loongarch-make-mstrict-align-configurable.patch
new file mode 100644 (file)
index 0000000..bfee2a4
--- /dev/null
@@ -0,0 +1,136 @@
+From 41596803302d83a67a80dc1efef4e51ac46acabb Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Sat, 25 Feb 2023 15:52:56 +0800
+Subject: LoongArch: Make -mstrict-align configurable
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 41596803302d83a67a80dc1efef4e51ac46acabb upstream.
+
+Introduce Kconfig option ARCH_STRICT_ALIGN to make -mstrict-align be
+configurable.
+
+Not all LoongArch cores support h/w unaligned access, we can use the
+-mstrict-align build parameter to prevent unaligned accesses.
+
+CPUs with h/w unaligned access support:
+Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
+
+CPUs without h/w unaligned access support:
+Loongson-2K500/2K1000.
+
+This option is enabled by default to make the kernel be able to run on
+all LoongArch systems. But you can disable it manually if you want to
+run kernel only on systems with h/w unaligned access support in order to
+optimise for performance.
+
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/Kconfig         |   19 +++++++++++++++++++
+ arch/loongarch/Makefile        |    5 +++++
+ arch/loongarch/kernel/Makefile |    4 +++-
+ arch/loongarch/kernel/traps.c  |    9 +++++++--
+ 4 files changed, 34 insertions(+), 3 deletions(-)
+
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -94,6 +94,7 @@ config LOONGARCH
+       select HAVE_DYNAMIC_FTRACE_WITH_ARGS
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS
+       select HAVE_EBPF_JIT
++      select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
+       select HAVE_EXIT_THREAD
+       select HAVE_FAST_GUP
+       select HAVE_FTRACE_MCOUNT_RECORD
+@@ -441,6 +442,24 @@ config ARCH_IOREMAP
+         protection support. However, you can enable LoongArch DMW-based
+         ioremap() for better performance.
++config ARCH_STRICT_ALIGN
++      bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT
++      default y
++      help
++        Not all LoongArch cores support h/w unaligned access, we can use
++        -mstrict-align build parameter to prevent unaligned accesses.
++
++        CPUs with h/w unaligned access support:
++        Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
++
++        CPUs without h/w unaligned access support:
++        Loongson-2K500/2K1000.
++
++        This option is enabled by default to make the kernel be able to run
++        on all LoongArch systems. But you can disable it manually if you want
++        to run kernel only on systems with h/w unaligned access support in
++        order to optimise for performance.
++
+ config KEXEC
+       bool "Kexec system call"
+       select KEXEC_CORE
+--- a/arch/loongarch/Makefile
++++ b/arch/loongarch/Makefile
+@@ -91,10 +91,15 @@ KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRES
+ # instead of .eh_frame so we don't discard them.
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
++ifdef CONFIG_ARCH_STRICT_ALIGN
+ # Don't emit unaligned accesses.
+ # Not all LoongArch cores support unaligned access, and as kernel we can't
+ # rely on others to provide emulation for these accesses.
+ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
++else
++# Optimise for performance on hardware supports unaligned access.
++KBUILD_CFLAGS += $(call cc-option,-mno-strict-align)
++endif
+ KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include)
+--- a/arch/loongarch/kernel/Makefile
++++ b/arch/loongarch/kernel/Makefile
+@@ -8,13 +8,15 @@ extra-y              := vmlinux.lds
+ obj-y         += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
+                  traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+                  elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
+-                 alternative.o unaligned.o unwind.o
++                 alternative.o unwind.o
+ obj-$(CONFIG_ACPI)            += acpi.o
+ obj-$(CONFIG_EFI)             += efi.o
+ obj-$(CONFIG_CPU_HAS_FPU)     += fpu.o
++obj-$(CONFIG_ARCH_STRICT_ALIGN)       += unaligned.o
++
+ ifdef CONFIG_FUNCTION_TRACER
+   ifndef CONFIG_DYNAMIC_FTRACE
+     obj-y += mcount.o ftrace.o
+--- a/arch/loongarch/kernel/traps.c
++++ b/arch/loongarch/kernel/traps.c
+@@ -371,9 +371,14 @@ int no_unaligned_warning __read_mostly =
+ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ {
+-      unsigned int *pc;
+       irqentry_state_t state = irqentry_enter(regs);
++#ifndef CONFIG_ARCH_STRICT_ALIGN
++      die_if_kernel("Kernel ale access", regs);
++      force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
++#else
++      unsigned int *pc;
++
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
+       /*
+@@ -397,8 +402,8 @@ asmlinkage void noinstr do_ale(struct pt
+ sigbus:
+       die_if_kernel("Kernel ale access", regs);
+       force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+-
+ out:
++#endif
+       irqentry_exit(regs, state);
+ }
diff --git a/queue-6.2/loongarch-make-writecombine-configurable-for-ioremap.patch b/queue-6.2/loongarch-make-writecombine-configurable-for-ioremap.patch
new file mode 100644 (file)
index 0000000..f345fdc
--- /dev/null
@@ -0,0 +1,130 @@
+From 16c52e503043aed1e2a2ce38d9249de5936c1f6b Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Tue, 18 Apr 2023 19:38:58 +0800
+Subject: LoongArch: Make WriteCombine configurable for ioremap()
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 16c52e503043aed1e2a2ce38d9249de5936c1f6b upstream.
+
+LoongArch maintains cache coherency in hardware, but when paired with
+LS7A chipsets the WUC attribute (Weak-ordered UnCached, which is similar
+to WriteCombine) is out of the scope of cache coherency machanism for
+PCIe devices (this is a PCIe protocol violation, which may be fixed in
+newer chipsets).
+
+This means WUC can only used for write-only memory regions now, so this
+option is disabled by default, making WUC silently fallback to SUC for
+ioremap(). You can enable this option if the kernel is ensured to run on
+hardware without this bug.
+
+Kernel parameter writecombine=on/off can be used to override the Kconfig
+option.
+
+Cc: stable@vger.kernel.org
+Suggested-by: WANG Xuerui <kernel@xen0n.name>
+Reviewed-by: WANG Xuerui <kernel@xen0n.name>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.rst |    1 +
+ Documentation/admin-guide/kernel-parameters.txt |    6 ++++++
+ arch/loongarch/Kconfig                          |   16 ++++++++++++++++
+ arch/loongarch/include/asm/io.h                 |    4 +++-
+ arch/loongarch/kernel/setup.c                   |   21 +++++++++++++++++++++
+ 5 files changed, 47 insertions(+), 1 deletion(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.rst
++++ b/Documentation/admin-guide/kernel-parameters.rst
+@@ -128,6 +128,7 @@ parameter is applicable::
+       KVM     Kernel Virtual Machine support is enabled.
+       LIBATA  Libata driver is enabled
+       LP      Printer support is enabled.
++      LOONGARCH LoongArch architecture is enabled.
+       LOOP    Loopback device support is enabled.
+       M68k    M68k architecture is enabled.
+                       These options have more detailed description inside of
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -6874,6 +6874,12 @@
+                       When enabled, memory and cache locality will be
+                       impacted.
++      writecombine=   [LOONGARCH] Control the MAT (Memory Access Type) of
++                      ioremap_wc().
++
++                      on   - Enable writecombine, use WUC for ioremap_wc()
++                      off  - Disable writecombine, use SUC for ioremap_wc()
++
+       x2apic_phys     [X86-64,APIC] Use x2apic physical mode instead of
+                       default x2apic cluster mode on platforms
+                       supporting x2apic.
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -442,6 +442,22 @@ config ARCH_IOREMAP
+         protection support. However, you can enable LoongArch DMW-based
+         ioremap() for better performance.
++config ARCH_WRITECOMBINE
++      bool "Enable WriteCombine (WUC) for ioremap()"
++      help
++        LoongArch maintains cache coherency in hardware, but when paired
++        with LS7A chipsets the WUC attribute (Weak-ordered UnCached, which
++        is similar to WriteCombine) is out of the scope of cache coherency
++        machanism for PCIe devices (this is a PCIe protocol violation, which
++        may be fixed in newer chipsets).
++
++        This means WUC can only used for write-only memory regions now, so
++        this option is disabled by default, making WUC silently fallback to
++        SUC for ioremap(). You can enable this option if the kernel is ensured
++        to run on hardware without this bug.
++
++        You can override this setting via writecombine=on/off boot parameter.
++
+ config ARCH_STRICT_ALIGN
+       bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT
+       default y
+--- a/arch/loongarch/include/asm/io.h
++++ b/arch/loongarch/include/asm/io.h
+@@ -54,8 +54,10 @@ static inline void __iomem *ioremap_prot
+  * @offset:    bus address of the memory
+  * @size:      size of the resource to map
+  */
++extern pgprot_t pgprot_wc;
++
+ #define ioremap_wc(offset, size)      \
+-      ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC))
++      ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
+ #define ioremap_cache(offset, size)   \
+       ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -160,6 +160,27 @@ static void __init smbios_parse(void)
+       dmi_walk(find_tokens, NULL);
+ }
++#ifdef CONFIG_ARCH_WRITECOMBINE
++pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
++#else
++pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
++#endif
++
++EXPORT_SYMBOL(pgprot_wc);
++
++static int __init setup_writecombine(char *p)
++{
++      if (!strcmp(p, "on"))
++              pgprot_wc = PAGE_KERNEL_WUC;
++      else if (!strcmp(p, "off"))
++              pgprot_wc = PAGE_KERNEL_SUC;
++      else
++              pr_warn("Unknown writecombine setting \"%s\".\n", p);
++
++      return 0;
++}
++early_param("writecombine", setup_writecombine);
++
+ static int usermem __initdata;
+ static int __init early_parse_mem(char *p)
diff --git a/queue-6.2/mips-define-runtime_discard_exit-in-ld-script.patch b/queue-6.2/mips-define-runtime_discard_exit-in-ld-script.patch
new file mode 100644 (file)
index 0000000..779b674
--- /dev/null
@@ -0,0 +1,35 @@
+From 6dcbd0a69c84a8ae7a442840a8cf6b1379dc8f16 Mon Sep 17 00:00:00 2001
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Date: Sat, 8 Apr 2023 21:33:48 +0100
+Subject: MIPS: Define RUNTIME_DISCARD_EXIT in LD script
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+commit 6dcbd0a69c84a8ae7a442840a8cf6b1379dc8f16 upstream.
+
+MIPS's exit sections are discarded at runtime as well.
+
+Fixes link error:
+`.exit.text' referenced in section `__jump_table' of fs/fuse/inode.o:
+defined in discarded section `.exit.text' of fs/fuse/inode.o
+
+Fixes: 99cb0d917ffa ("arch: fix broken BuildID for arm64 and riscv")
+Reported-by: "kernelci.org bot" <bot@kernelci.org>
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/vmlinux.lds.S |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -15,6 +15,8 @@
+ #define EMITS_PT_NOTE
+ #endif
++#define RUNTIME_DISCARD_EXIT
++
+ #include <asm-generic/vmlinux.lds.h>
+ #undef mips
index e817cdf1a460e220e09998c68c520a95dbdc95ca..c61850c47e4bbec22a876eb366cfff5c194d66b2 100644 (file)
@@ -90,3 +90,9 @@ mm-kmsan-handle-alloc-failures-in-kmsan_ioremap_page_range.patch
 mm-kmsan-handle-alloc-failures-in-kmsan_vmap_pages_range_noflush.patch
 mm-page_alloc-skip-regions-with-hugetlbfs-pages-when-allocating-1g-pages.patch
 mm-mmap-regression-fix-for-unmapped_area-_topdown.patch
+cifs-avoid-dup-prefix-path-in-dfs_get_automount_devname.patch
+kvm-arm64-make-vcpu-flag-updates-non-preemptible.patch
+kvm-arm64-fix-buffer-overflow-in-kvm_arm_set_fw_reg.patch
+mips-define-runtime_discard_exit-in-ld-script.patch
+loongarch-make-mstrict-align-configurable.patch
+loongarch-make-writecombine-configurable-for-ioremap.patch