--- /dev/null
+From 94cd8fa09f5f1ebdd4e90964b08b7f2cc4b36c43 Mon Sep 17 00:00:00 2001
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Wed, 14 Dec 2022 17:20:08 -0500
+Subject: futex: Fix futex_waitv() hrtimer debug object leak on kcalloc error
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+commit 94cd8fa09f5f1ebdd4e90964b08b7f2cc4b36c43 upstream.
+
+In a scenario where kcalloc() fails to allocate memory, the futex_waitv
+system call immediately returns -ENOMEM without invoking
+destroy_hrtimer_on_stack(). When CONFIG_DEBUG_OBJECTS_TIMERS=y, this
+results in leaking a timer debug object.
+
+Fixes: bf69bad38cf6 ("futex: Implement sys_futex_waitv()")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
+Cc: stable@vger.kernel.org
+Cc: stable@vger.kernel.org # v5.16+
+Link: https://lore.kernel.org/r/20221214222008.200393-1-mathieu.desnoyers@efficios.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/futex/syscalls.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/kernel/futex/syscalls.c
++++ b/kernel/futex/syscalls.c
+@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct fute
+ }
+
+ futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
+- if (!futexv)
+- return -ENOMEM;
++ if (!futexv) {
++ ret = -ENOMEM;
++ goto destroy_timer;
++ }
+
+ ret = futex_parse_waitv(futexv, waiters, nr_futexes);
+ if (!ret)
+ ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
+
++ kfree(futexv);
++
++destroy_timer:
+ if (timeout) {
+ hrtimer_cancel(&to.timer);
+ destroy_hrtimer_on_stack(&to.timer);
+ }
+-
+- kfree(futexv);
+ return ret;
+ }
+
--- /dev/null
+From 9f2b5debc07073e6dfdd774e3594d0224b991927 Mon Sep 17 00:00:00 2001
+From: Aditya Garg <gargaditya08@live.com>
+Date: Wed, 7 Dec 2022 03:05:40 +0000
+Subject: hfsplus: fix bug causing custom uid and gid being unable to be assigned with mount
+
+From: Aditya Garg <gargaditya08@live.com>
+
+commit 9f2b5debc07073e6dfdd774e3594d0224b991927 upstream.
+
+Despite specifying UID and GID in mount command, the specified UID and GID
+were not being assigned. This patch fixes this issue.
+
+Link: https://lkml.kernel.org/r/C0264BF5-059C-45CF-B8DA-3A3BD2C803A2@live.com
+Signed-off-by: Aditya Garg <gargaditya08@live.com>
+Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/hfsplus_fs.h | 2 ++
+ fs/hfsplus/inode.c | 4 ++--
+ fs/hfsplus/options.c | 4 ++++
+ 3 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -198,6 +198,8 @@ struct hfsplus_sb_info {
+ #define HFSPLUS_SB_HFSX 3
+ #define HFSPLUS_SB_CASEFOLD 4
+ #define HFSPLUS_SB_NOBARRIER 5
++#define HFSPLUS_SB_UID 6
++#define HFSPLUS_SB_GID 7
+
+ static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
+ {
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -192,11 +192,11 @@ static void hfsplus_get_perms(struct ino
+ mode = be16_to_cpu(perms->mode);
+
+ i_uid_write(inode, be32_to_cpu(perms->owner));
+- if (!i_uid_read(inode) && !mode)
++ if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
+ inode->i_uid = sbi->uid;
+
+ i_gid_write(inode, be32_to_cpu(perms->group));
+- if (!i_gid_read(inode) && !mode)
++ if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
+ inode->i_gid = sbi->gid;
+
+ if (dir) {
+--- a/fs/hfsplus/options.c
++++ b/fs/hfsplus/options.c
+@@ -140,6 +140,8 @@ int hfsplus_parse_options(char *input, s
+ if (!uid_valid(sbi->uid)) {
+ pr_err("invalid uid specified\n");
+ return 0;
++ } else {
++ set_bit(HFSPLUS_SB_UID, &sbi->flags);
+ }
+ break;
+ case opt_gid:
+@@ -151,6 +153,8 @@ int hfsplus_parse_options(char *input, s
+ if (!gid_valid(sbi->gid)) {
+ pr_err("invalid gid specified\n");
+ return 0;
++ } else {
++ set_bit(HFSPLUS_SB_GID, &sbi->flags);
+ }
+ break;
+ case opt_part:
--- /dev/null
+From 7ba594d700998bafa96a75360d2e060aa39156d2 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Thu, 15 Dec 2022 17:26:57 +0100
+Subject: kmsan: export kmsan_handle_urb
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 7ba594d700998bafa96a75360d2e060aa39156d2 upstream.
+
+USB support can be in a loadable module, and this causes a link failure
+with KMSAN:
+
+ERROR: modpost: "kmsan_handle_urb" [drivers/usb/core/usbcore.ko] undefined!
+
+Export the symbol so it can be used by this module.
+
+Link: https://lkml.kernel.org/r/20221215162710.3802378-1-arnd@kernel.org
+Fixes: 553a80188a5d ("kmsan: handle memory sent to/from USB")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kmsan/hooks.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
+index 35f6b6e6a908..3807502766a3 100644
+--- a/mm/kmsan/hooks.c
++++ b/mm/kmsan/hooks.c
+@@ -260,6 +260,7 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out)
+ urb->transfer_buffer_length,
+ /*checked*/ false);
+ }
++EXPORT_SYMBOL_GPL(kmsan_handle_urb);
+
+ static void kmsan_handle_dma_page(const void *addr, size_t size,
+ enum dma_data_direction dir)
+--
+2.39.0
+
--- /dev/null
+From aaa746ad8b30f38ef89a301faf339ef1c19cf33a Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Thu, 15 Dec 2022 17:30:17 +0100
+Subject: kmsan: include linux/vmalloc.h
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit aaa746ad8b30f38ef89a301faf339ef1c19cf33a upstream.
+
+This is needed for the vmap/vunmap declarations:
+
+mm/kmsan/kmsan_test.c:316:9: error: implicit declaration of function 'vmap' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
+ vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ ^
+mm/kmsan/kmsan_test.c:316:29: error: use of undeclared identifier 'VM_MAP'
+ vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ ^
+mm/kmsan/kmsan_test.c:322:3: error: implicit declaration of function 'vunmap' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
+ vunmap(vbuf);
+ ^
+
+Link: https://lkml.kernel.org/r/20221215163046.4079767-1-arnd@kernel.org
+Fixes: 8ed691b02ade ("kmsan: add tests for KMSAN")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kmsan/kmsan_test.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
+index eb44ef3c5f29..088e21a48dc4 100644
+--- a/mm/kmsan/kmsan_test.c
++++ b/mm/kmsan/kmsan_test.c
+@@ -22,6 +22,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/string.h>
+ #include <linux/tracepoint.h>
++#include <linux/vmalloc.h>
+ #include <trace/events/printk.h>
+
+ static DEFINE_PER_CPU(int, per_cpu_var);
+--
+2.39.0
+
--- /dev/null
+From 38ce7c9bdfc228c14d7621ba36d3eebedd9d4f76 Mon Sep 17 00:00:00 2001
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Thu, 15 Dec 2022 14:46:21 -0500
+Subject: mm/mempolicy: fix memory leak in set_mempolicy_home_node system call
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+commit 38ce7c9bdfc228c14d7621ba36d3eebedd9d4f76 upstream.
+
+When encountering any vma in the range with policy other than MPOL_BIND or
+MPOL_PREFERRED_MANY, an error is returned without issuing a mpol_put on
+the policy just allocated with mpol_dup().
+
+This allows arbitrary users to leak kernel memory.
+
+Link: https://lkml.kernel.org/r/20221215194621.202816-1-mathieu.desnoyers@efficios.com
+Fixes: c6018b4b2549 ("mm/mempolicy: add set_mempolicy_home_node syscall")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Reviewed-by: Randy Dunlap <rdunlap@infradead.org>
+Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Feng Tang <feng.tang@intel.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Huang Ying <ying.huang@intel.com>
+Cc: <stable@vger.kernel.org> [5.17+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mempolicy.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1540,6 +1540,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node,
+ * the home node for vmas we already updated before.
+ */
+ if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
++ mpol_put(new);
+ err = -EOPNOTSUPP;
+ break;
+ }
--- /dev/null
+From 6f12be792fde994ed934168f93c2a0d2a0cf0bc5 Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Fri, 16 Dec 2022 17:32:27 +0100
+Subject: mm, mremap: fix mremap() expanding vma with addr inside vma
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 6f12be792fde994ed934168f93c2a0d2a0cf0bc5 upstream.
+
+Since 6.1 we have noticed random rpm install failures that were tracked to
+mremap() returning -ENOMEM and to commit ca3d76b0aa80 ("mm: add merging
+after mremap resize").
+
+The problem occurs when mremap() expands a VMA in place, but using an
+starting address that's not vma->vm_start, but somewhere in the middle.
+The extension_pgoff calculation introduced by the commit is wrong in that
+case, so vma_merge() fails due to pgoffs not being compatible. Fix the
+calculation.
+
+By the way it seems that the situations, where rpm now expands a vma from
+the middle, were made possible also due to that commit, thanks to the
+improved vma merging. Yet it should work just fine, except for the buggy
+calculation.
+
+Link: https://lkml.kernel.org/r/20221216163227.24648-1-vbabka@suse.cz
+Reported-by: Jiri Slaby <jirislaby@kernel.org>
+ Link: https://bugzilla.suse.com/show_bug.cgi?id=1206359
+Fixes: ca3d76b0aa80 ("mm: add merging after mremap resize")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jakub Matěna <matenajakub@gmail.com>
+Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mremap.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -1016,7 +1016,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
+ long pages = (new_len - old_len) >> PAGE_SHIFT;
+ unsigned long extension_start = addr + old_len;
+ unsigned long extension_end = addr + new_len;
+- pgoff_t extension_pgoff = vma->vm_pgoff + (old_len >> PAGE_SHIFT);
++ pgoff_t extension_pgoff = vma->vm_pgoff +
++ ((extension_start - vma->vm_start) >> PAGE_SHIFT);
+
+ if (vma->vm_flags & VM_ACCOUNT) {
+ if (security_vm_enough_memory_mm(mm, pages)) {
--- /dev/null
+From beca3e311a49cd3c55a056096531737d7afa4361 Mon Sep 17 00:00:00 2001
+From: Luca Stefani <luca@osomprivacy.com>
+Date: Thu, 22 Dec 2022 14:10:49 +0100
+Subject: pstore: Properly assign mem_type property
+
+From: Luca Stefani <luca@osomprivacy.com>
+
+commit beca3e311a49cd3c55a056096531737d7afa4361 upstream.
+
+If mem-type is specified in the device tree
+it would end up overriding the record_size
+field instead of populating mem_type.
+
+As record_size is currently parsed after the
+improper assignment with default size 0 it
+continued to work as expected regardless of the
+value found in the device tree.
+
+Simply changing the target field of the struct
+is enough to get mem-type working as expected.
+
+Fixes: 9d843e8fafc7 ("pstore: Add mem_type property DT parsing support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Luca Stefani <luca@osomprivacy.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20221222131049.286288-1-luca@osomprivacy.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/pstore/ram.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -670,7 +670,7 @@ static int ramoops_parse_dt(struct platf
+ field = value; \
+ }
+
+- parse_u32("mem-type", pdata->record_size, pdata->mem_type);
++ parse_u32("mem-type", pdata->mem_type, pdata->mem_type);
+ parse_u32("record-size", pdata->record_size, 0);
+ parse_u32("console-size", pdata->console_size, 0);
+ parse_u32("ftrace-size", pdata->ftrace_size, 0);
--- /dev/null
+From 99b3b837855b987563bcfb397cf9ddd88262814b Mon Sep 17 00:00:00 2001
+From: Qiujun Huang <hqjagain@gmail.com>
+Date: Sun, 4 Sep 2022 23:17:13 +0800
+Subject: pstore/zone: Use GFP_ATOMIC to allocate zone buffer
+
+From: Qiujun Huang <hqjagain@gmail.com>
+
+commit 99b3b837855b987563bcfb397cf9ddd88262814b upstream.
+
+There is a case found when triggering a panic_on_oom, pstore fails to dump
+kmsg. Because psz_kmsg_write_record can't get the new buffer.
+
+Handle this by using GFP_ATOMIC to allocate a buffer at lower watermark.
+
+Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
+Fixes: 335426c6dcdd ("pstore/zone: Provide way to skip "broken" zone for MTD devices")
+Cc: WeiXiong Liao <gmpy.liaowx@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/CAJRQjofRCF7wjrYmw3D7zd5QZnwHQq+F8U-mJDJ6NZ4bddYdLA@mail.gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/pstore/zone.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pstore/zone.c
++++ b/fs/pstore/zone.c
+@@ -761,7 +761,7 @@ static inline int notrace psz_kmsg_write
+ /* avoid destroying old data, allocate a new one */
+ len = zone->buffer_size + sizeof(*zone->buffer);
+ zone->oldbuf = zone->buffer;
+- zone->buffer = kzalloc(len, GFP_KERNEL);
++ zone->buffer = kzalloc(len, GFP_ATOMIC);
+ if (!zone->buffer) {
+ zone->buffer = zone->oldbuf;
+ return -ENOMEM;
--- /dev/null
+From 1c0908d8e441631f5b8ba433523cf39339ee2ba0 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Fri, 2 Dec 2022 10:02:23 +0000
+Subject: rtmutex: Add acquire semantics for rtmutex lock acquisition slow path
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit 1c0908d8e441631f5b8ba433523cf39339ee2ba0 upstream.
+
+Jan Kara reported the following bug triggering on 6.0.5-rt14 running dbench
+on XFS on arm64.
+
+ kernel BUG at fs/inode.c:625!
+ Internal error: Oops - BUG: 0 [#1] PREEMPT_RT SMP
+ CPU: 11 PID: 6611 Comm: dbench Tainted: G E 6.0.0-rt14-rt+ #1
+ pc : clear_inode+0xa0/0xc0
+ lr : clear_inode+0x38/0xc0
+ Call trace:
+ clear_inode+0xa0/0xc0
+ evict+0x160/0x180
+ iput+0x154/0x240
+ do_unlinkat+0x184/0x300
+ __arm64_sys_unlinkat+0x48/0xc0
+ el0_svc_common.constprop.4+0xe4/0x2c0
+ do_el0_svc+0xac/0x100
+ el0_svc+0x78/0x200
+ el0t_64_sync_handler+0x9c/0xc0
+ el0t_64_sync+0x19c/0x1a0
+
+It also affects 6.1-rc7-rt5 and affects a preempt-rt fork of 5.14 so this
+is likely a bug that existed forever and only became visible when ARM
+support was added to preempt-rt. The same problem does not occur on x86-64
+and he also reported that converting sb->s_inode_wblist_lock to
+raw_spinlock_t makes the problem disappear indicating that the RT spinlock
+variant is the problem.
+
+Which in turn means that RT mutexes on ARM64 and any other weakly ordered
+architecture are affected by this independent of RT.
+
+Will Deacon observed:
+
+ "I'd be more inclined to be suspicious of the slowpath tbh, as we need to
+ make sure that we have acquire semantics on all paths where the lock can
+ be taken. Looking at the rtmutex code, this really isn't obvious to me
+ -- for example, try_to_take_rt_mutex() appears to be able to return via
+ the 'takeit' label without acquire semantics and it looks like we might
+ be relying on the caller's subsequent _unlock_ of the wait_lock for
+ ordering, but that will give us release semantics which aren't correct."
+
+Sebastian Andrzej Siewior prototyped a fix that does work based on that
+comment but it was a little bit overkill and added some fences that should
+not be necessary.
+
+The lock owner is updated with an IRQ-safe raw spinlock held, but the
+spin_unlock does not provide acquire semantics which are needed when
+acquiring a mutex.
+
+Adds the necessary acquire semantics for lock owner updates in the slow path
+acquisition and the waiter bit logic.
+
+It successfully completed 10 iterations of the dbench workload while the
+vanilla kernel fails on the first iteration.
+
+[ bigeasy@linutronix.de: Initial prototype fix ]
+
+Fixes: 700318d1d7b38 ("locking/rtmutex: Use acquire/release semantics")
+Fixes: 23f78d4a03c5 ("[PATCH] pi-futex: rt mutex core")
+Reported-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221202100223.6mevpbl7i6x5udfd@techsingularity.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/rtmutex.c | 55 +++++++++++++++++++++++++++++++++++--------
+ kernel/locking/rtmutex_api.c | 6 ++--
+ 2 files changed, 49 insertions(+), 12 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(
+ * set this bit before looking at the lock.
+ */
+
+-static __always_inline void
+-rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
++static __always_inline struct task_struct *
++rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
+ {
+ unsigned long val = (unsigned long)owner;
+
+ if (rt_mutex_has_waiters(lock))
+ val |= RT_MUTEX_HAS_WAITERS;
+
+- WRITE_ONCE(lock->owner, (struct task_struct *)val);
++ return (struct task_struct *)val;
++}
++
++static __always_inline void
++rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
++{
++ /*
++ * lock->wait_lock is held but explicit acquire semantics are needed
++ * for a new lock owner so WRITE_ONCE is insufficient.
++ */
++ xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
++}
++
++static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
++{
++ /* lock->wait_lock is held so the unlock provides release semantics. */
++ WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
+ }
+
+ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
+@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mut
+ ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+ }
+
+-static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
++static __always_inline void
++fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
+ {
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mut
+ * still set.
+ */
+ owner = READ_ONCE(*p);
+- if (owner & RT_MUTEX_HAS_WAITERS)
+- WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
++ if (owner & RT_MUTEX_HAS_WAITERS) {
++ /*
++ * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
++ * why xchg_acquire() is used for updating owner for
++ * locking and WRITE_ONCE() for unlocking.
++ *
++ * WRITE_ONCE() would work for the acquire case too, but
++ * in case that the lock acquisition failed it might
++ * force other lockers into the slow path unnecessarily.
++ */
++ if (acquire_lock)
++ xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
++ else
++ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
++ }
+ }
+
+ /*
+@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mute
+ owner = *p;
+ } while (cmpxchg_relaxed(p, owner,
+ owner | RT_MUTEX_HAS_WAITERS) != owner);
++
++ /*
++ * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
++ * operations in the event of contention. Ensure the successful
++ * cmpxchg is visible.
++ */
++ smp_mb__after_atomic();
+ }
+
+ /*
+@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtryloc
+ * try_to_take_rt_mutex() sets the lock waiters bit
+ * unconditionally. Clean this up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+
+ return ret;
+ }
+@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(s
+ * try_to_take_rt_mutex() sets the waiter bit
+ * unconditionally. We might have to fix that up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+
+ trace_contention_end(lock, ret);
+
+@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_lock
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally.
+ * We might have to fix that up:
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+ debug_rt_mutex_free_waiter(&waiter);
+
+ trace_contention_end(lock, 0);
+--- a/kernel/locking/rtmutex_api.c
++++ b/kernel/locking/rtmutex_api.c
+@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(
+ void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
+ {
+ debug_rt_mutex_proxy_unlock(lock);
+- rt_mutex_set_owner(lock, NULL);
++ rt_mutex_clear_owner(lock);
+ }
+
+ /**
+@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(str
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, true);
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return ret;
+@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+ */
+- fixup_rt_mutex_waiters(lock);
++ fixup_rt_mutex_waiters(lock, false);
+
+ raw_spin_unlock_irq(&lock->wait_lock);
+
kprobes-kretprobe-events-missing-on-2-core-kvm-guest.patch
hid-multitouch-fix-asus-expertbook-p2-p2451fa-trackp.patch
hid-plantronics-additional-pids-for-double-volume-ke.patch
+futex-fix-futex_waitv-hrtimer-debug-object-leak-on-kcalloc-error.patch
+rtmutex-add-acquire-semantics-for-rtmutex-lock-acquisition-slow-path.patch
+mm-mremap-fix-mremap-expanding-vma-with-addr-inside-vma.patch
+mm-mempolicy-fix-memory-leak-in-set_mempolicy_home_node-system-call.patch
+kmsan-export-kmsan_handle_urb.patch
+kmsan-include-linux-vmalloc.h.patch
+pstore-properly-assign-mem_type-property.patch
+pstore-zone-use-gfp_atomic-to-allocate-zone-buffer.patch
+hfsplus-fix-bug-causing-custom-uid-and-gid-being-unable-to-be-assigned-with-mount.patch