--- /dev/null
+From e2a619ca0b38f2114347b7078b8a67d72d457a3d Mon Sep 17 00:00:00 2001
+From: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+Date: Fri, 22 Jul 2022 13:07:11 +0200
+Subject: asm-generic: remove a broken and needless ifdef conditional
+
+From: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+
+commit e2a619ca0b38f2114347b7078b8a67d72d457a3d upstream.
+
+Commit 527701eda5f1 ("lib: Add a generic version of devmem_is_allowed()")
+introduces the config symbol GENERIC_LIB_DEVMEM_IS_ALLOWED, but then
+falsely refers to CONFIG_GENERIC_DEVMEM_IS_ALLOWED (note the missing LIB
+in the reference) in ./include/asm-generic/io.h.
+
+Luckily, ./scripts/checkkconfigsymbols.py warns on non-existing configs:
+
+GENERIC_DEVMEM_IS_ALLOWED
+Referencing files: include/asm-generic/io.h
+
+The actual fix, though, is simply to not to make this function declaration
+dependent on any kernel config. For architectures that intend to use
+the generic version, the arch's 'select GENERIC_LIB_DEVMEM_IS_ALLOWED' will
+lead to picking the function definition, and for other architectures, this
+function is simply defined elsewhere.
+
+The wrong '#ifndef' on a non-existing config symbol also always had the
+same effect (although more by mistake than by intent). So, there is no
+functional change.
+
+Remove this broken and needless ifdef conditional.
+
+Fixes: 527701eda5f1 ("lib: Add a generic version of devmem_is_allowed()")
+Signed-off-by: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/asm-generic/io.h | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -1125,9 +1125,7 @@ static inline void memcpy_toio(volatile
+ }
+ #endif
+
+-#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
+ extern int devmem_is_allowed(unsigned long pfn);
+-#endif
+
+ #endif /* __KERNEL__ */
+
--- /dev/null
+From d0be8347c623e0ac4202a1d4e0373882821f56b0 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Thu, 21 Jul 2022 09:10:50 -0700
+Subject: Bluetooth: L2CAP: Fix use-after-free caused by l2cap_chan_put
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+commit d0be8347c623e0ac4202a1d4e0373882821f56b0 upstream.
+
+This fixes the following trace which is caused by hci_rx_work starting up
+*after* the final channel reference has been put() during sock_close() but
+*before* the references to the channel have been destroyed, so instead
+the code now rely on kref_get_unless_zero/l2cap_chan_hold_unless_zero to
+prevent referencing a channel that is about to be destroyed.
+
+ refcount_t: increment on 0; use-after-free.
+ BUG: KASAN: use-after-free in refcount_dec_and_test+0x20/0xd0
+ Read of size 4 at addr ffffffc114f5bf18 by task kworker/u17:14/705
+
+ CPU: 4 PID: 705 Comm: kworker/u17:14 Tainted: G S W
+ 4.14.234-00003-g1fb6d0bd49a4-dirty #28
+ Hardware name: Qualcomm Technologies, Inc. SM8150 V2 PM8150
+ Google Inc. MSM sm8150 Flame DVT (DT)
+ Workqueue: hci0 hci_rx_work
+ Call trace:
+ dump_backtrace+0x0/0x378
+ show_stack+0x20/0x2c
+ dump_stack+0x124/0x148
+ print_address_description+0x80/0x2e8
+ __kasan_report+0x168/0x188
+ kasan_report+0x10/0x18
+ __asan_load4+0x84/0x8c
+ refcount_dec_and_test+0x20/0xd0
+ l2cap_chan_put+0x48/0x12c
+ l2cap_recv_frame+0x4770/0x6550
+ l2cap_recv_acldata+0x44c/0x7a4
+ hci_acldata_packet+0x100/0x188
+ hci_rx_work+0x178/0x23c
+ process_one_work+0x35c/0x95c
+ worker_thread+0x4cc/0x960
+ kthread+0x1a8/0x1c4
+ ret_from_fork+0x10/0x18
+
+Cc: stable@kernel.org
+Reported-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Tested-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/bluetooth/l2cap.h | 1
+ net/bluetooth/l2cap_core.c | 61 +++++++++++++++++++++++++++++++++---------
+ 2 files changed, 49 insertions(+), 13 deletions(-)
+
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -847,6 +847,7 @@ enum {
+ };
+
+ void l2cap_chan_hold(struct l2cap_chan *c);
++struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
+ void l2cap_chan_put(struct l2cap_chan *c);
+
+ static inline void l2cap_chan_lock(struct l2cap_chan *chan)
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -111,7 +111,8 @@ static struct l2cap_chan *__l2cap_get_ch
+ }
+
+ /* Find channel with given SCID.
+- * Returns locked channel. */
++ * Returns a reference locked channel.
++ */
+ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+ u16 cid)
+ {
+@@ -119,15 +120,19 @@ static struct l2cap_chan *l2cap_get_chan
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_scid(conn, cid);
+- if (c)
+- l2cap_chan_lock(c);
++ if (c) {
++ /* Only lock if chan reference is not 0 */
++ c = l2cap_chan_hold_unless_zero(c);
++ if (c)
++ l2cap_chan_lock(c);
++ }
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
+ }
+
+ /* Find channel with given DCID.
+- * Returns locked channel.
++ * Returns a reference locked channel.
+ */
+ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+ u16 cid)
+@@ -136,8 +141,12 @@ static struct l2cap_chan *l2cap_get_chan
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_dcid(conn, cid);
+- if (c)
+- l2cap_chan_lock(c);
++ if (c) {
++ /* Only lock if chan reference is not 0 */
++ c = l2cap_chan_hold_unless_zero(c);
++ if (c)
++ l2cap_chan_lock(c);
++ }
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
+@@ -162,8 +171,12 @@ static struct l2cap_chan *l2cap_get_chan
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_ident(conn, ident);
+- if (c)
+- l2cap_chan_lock(c);
++ if (c) {
++ /* Only lock if chan reference is not 0 */
++ c = l2cap_chan_hold_unless_zero(c);
++ if (c)
++ l2cap_chan_lock(c);
++ }
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
+@@ -497,6 +510,16 @@ void l2cap_chan_hold(struct l2cap_chan *
+ kref_get(&c->kref);
+ }
+
++struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
++{
++ BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
++
++ if (!kref_get_unless_zero(&c->kref))
++ return NULL;
++
++ return c;
++}
++
+ void l2cap_chan_put(struct l2cap_chan *c)
+ {
+ BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
+@@ -1969,7 +1992,10 @@ static struct l2cap_chan *l2cap_global_c
+ src_match = !bacmp(&c->src, src);
+ dst_match = !bacmp(&c->dst, dst);
+ if (src_match && dst_match) {
+- l2cap_chan_hold(c);
++ c = l2cap_chan_hold_unless_zero(c);
++ if (!c)
++ continue;
++
+ read_unlock(&chan_list_lock);
+ return c;
+ }
+@@ -1984,7 +2010,7 @@ static struct l2cap_chan *l2cap_global_c
+ }
+
+ if (c1)
+- l2cap_chan_hold(c1);
++ c1 = l2cap_chan_hold_unless_zero(c1);
+
+ read_unlock(&chan_list_lock);
+
+@@ -4464,6 +4490,7 @@ static inline int l2cap_config_req(struc
+
+ unlock:
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+ return err;
+ }
+
+@@ -4578,6 +4605,7 @@ static inline int l2cap_config_rsp(struc
+
+ done:
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+ return err;
+ }
+
+@@ -5305,6 +5333,7 @@ send_move_response:
+ l2cap_send_move_chan_rsp(chan, result);
+
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ return 0;
+ }
+@@ -5397,6 +5426,7 @@ static void l2cap_move_continue(struct l
+ }
+
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+ }
+
+ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
+@@ -5426,6 +5456,7 @@ static void l2cap_move_fail(struct l2cap
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+ }
+
+ static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+@@ -5489,6 +5520,7 @@ static int l2cap_move_channel_confirm(st
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ return 0;
+ }
+@@ -5524,6 +5556,7 @@ static inline int l2cap_move_channel_con
+ }
+
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ return 0;
+ }
+@@ -5896,12 +5929,11 @@ static inline int l2cap_le_credits(struc
+ if (credits > max_credits) {
+ BT_ERR("LE credits overflow");
+ l2cap_send_disconn_req(chan, ECONNRESET);
+- l2cap_chan_unlock(chan);
+
+ /* Return 0 so that we don't trigger an unnecessary
+ * command reject packet.
+ */
+- return 0;
++ goto unlock;
+ }
+
+ chan->tx_credits += credits;
+@@ -5912,7 +5944,9 @@ static inline int l2cap_le_credits(struc
+ if (chan->tx_credits)
+ chan->ops->resume(chan);
+
++unlock:
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ return 0;
+ }
+@@ -7598,6 +7632,7 @@ drop:
+
+ done:
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+ }
+
+ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+@@ -8086,7 +8121,7 @@ static struct l2cap_chan *l2cap_global_f
+ if (src_type != c->src_type)
+ continue;
+
+- l2cap_chan_hold(c);
++ c = l2cap_chan_hold_unless_zero(c);
+ read_unlock(&chan_list_lock);
+ return c;
+ }
--- /dev/null
+From 0c09bc33aa8e9dc867300acaadc318c2f0d85a1e Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Mon, 25 Jul 2022 16:36:29 -0700
+Subject: drm/simpledrm: Fix return type of simpledrm_simple_display_pipe_mode_valid()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 0c09bc33aa8e9dc867300acaadc318c2f0d85a1e upstream.
+
+When booting a kernel compiled with clang's CFI protection
+(CONFIG_CFI_CLANG), there is a CFI failure in
+drm_simple_kms_crtc_mode_valid() when trying to call
+simpledrm_simple_display_pipe_mode_valid() through ->mode_valid():
+
+[ 0.322802] CFI failure (target: simpledrm_simple_display_pipe_mode_valid+0x0/0x8):
+...
+[ 0.324928] Call trace:
+[ 0.324969] __ubsan_handle_cfi_check_fail+0x58/0x60
+[ 0.325053] __cfi_check_fail+0x3c/0x44
+[ 0.325120] __cfi_slowpath_diag+0x178/0x200
+[ 0.325192] drm_simple_kms_crtc_mode_valid+0x58/0x80
+[ 0.325279] __drm_helper_update_and_validate+0x31c/0x464
+...
+
+The ->mode_valid() member in 'struct drm_simple_display_pipe_funcs'
+expects a return type of 'enum drm_mode_status', not 'int'. Correct it
+to fix the CFI failure.
+
+Cc: stable@vger.kernel.org
+Fixes: 11e8f5fd223b ("drm: Add simpledrm driver")
+Link: https://github.com/ClangBuiltLinux/linux/issues/1647
+Reported-by: Tomasz Paweł Gajc <tpgxyz@gmail.com>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220725233629.223223-1-nathan@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/tiny/simpledrm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/tiny/simpledrm.c
++++ b/drivers/gpu/drm/tiny/simpledrm.c
+@@ -614,7 +614,7 @@ static const struct drm_connector_funcs
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ };
+
+-static int
++static enum drm_mode_status
+ simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+ {
--- /dev/null
+From bdeb77bc2c405fa9f954c20269db175a0bd2793f Mon Sep 17 00:00:00 2001
+From: Andrei Vagin <avagin@gmail.com>
+Date: Sat, 16 Jul 2022 21:37:10 -0700
+Subject: fs: sendfile handles O_NONBLOCK of out_fd
+
+From: Andrei Vagin <avagin@gmail.com>
+
+commit bdeb77bc2c405fa9f954c20269db175a0bd2793f upstream.
+
+sendfile has to return EAGAIN if out_fd is nonblocking and the write into
+it would block.
+
+Here is a small reproducer for the problem:
+
+#define _GNU_SOURCE /* See feature_test_macros(7) */
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/sendfile.h>
+
+
+#define FILE_SIZE (1UL << 30)
+int main(int argc, char **argv) {
+ int p[2], fd;
+
+ if (pipe2(p, O_NONBLOCK))
+ return 1;
+
+ fd = open(argv[1], O_RDWR | O_TMPFILE, 0666);
+ if (fd < 0)
+ return 1;
+ ftruncate(fd, FILE_SIZE);
+
+ if (sendfile(p[1], fd, 0, FILE_SIZE) == -1) {
+ fprintf(stderr, "FAIL\n");
+ }
+ if (sendfile(p[1], fd, 0, FILE_SIZE) != -1 || errno != EAGAIN) {
+ fprintf(stderr, "FAIL\n");
+ }
+ return 0;
+}
+
+It worked before b964bf53e540, it is stuck after b964bf53e540, and it
+works again with this fix.
+
+This regression occurred because do_splice_direct() calls pipe_write
+that handles O_NONBLOCK. Here is a trace log from the reproducer:
+
+ 1) | __x64_sys_sendfile64() {
+ 1) | do_sendfile() {
+ 1) | __fdget()
+ 1) | rw_verify_area()
+ 1) | __fdget()
+ 1) | rw_verify_area()
+ 1) | do_splice_direct() {
+ 1) | rw_verify_area()
+ 1) | splice_direct_to_actor() {
+ 1) | do_splice_to() {
+ 1) | rw_verify_area()
+ 1) | generic_file_splice_read()
+ 1) + 74.153 us | }
+ 1) | direct_splice_actor() {
+ 1) | iter_file_splice_write() {
+ 1) | __kmalloc()
+ 1) 0.148 us | pipe_lock();
+ 1) 0.153 us | splice_from_pipe_next.part.0();
+ 1) 0.162 us | page_cache_pipe_buf_confirm();
+... 16 times
+ 1) 0.159 us | page_cache_pipe_buf_confirm();
+ 1) | vfs_iter_write() {
+ 1) | do_iter_write() {
+ 1) | rw_verify_area()
+ 1) | do_iter_readv_writev() {
+ 1) | pipe_write() {
+ 1) | mutex_lock()
+ 1) 0.153 us | mutex_unlock();
+ 1) 1.368 us | }
+ 1) 1.686 us | }
+ 1) 5.798 us | }
+ 1) 6.084 us | }
+ 1) 0.174 us | kfree();
+ 1) 0.152 us | pipe_unlock();
+ 1) + 14.461 us | }
+ 1) + 14.783 us | }
+ 1) 0.164 us | page_cache_pipe_buf_release();
+... 16 times
+ 1) 0.161 us | page_cache_pipe_buf_release();
+ 1) | touch_atime()
+ 1) + 95.854 us | }
+ 1) + 99.784 us | }
+ 1) ! 107.393 us | }
+ 1) ! 107.699 us | }
+
+Link: https://lkml.kernel.org/r/20220415005015.525191-1-avagin@gmail.com
+Fixes: b964bf53e540 ("teach sendfile(2) to handle send-to-pipe directly")
+Signed-off-by: Andrei Vagin <avagin@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/read_write.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -1250,6 +1250,9 @@ static ssize_t do_sendfile(int out_fd, i
+ count, fl);
+ file_end_write(out.file);
+ } else {
++ if (out.file->f_flags & O_NONBLOCK)
++ fl |= SPLICE_F_NONBLOCK;
++
+ retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
+ }
+
--- /dev/null
+From da9a298f5fad0dc615079a340da42928bc5b138e Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Sat, 9 Jul 2022 17:26:29 +0800
+Subject: hugetlb: fix memoryleak in hugetlb_mcopy_atomic_pte
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit da9a298f5fad0dc615079a340da42928bc5b138e upstream.
+
+When alloc_huge_page fails, *pagep is set to NULL without put_page first.
+So the hugepage indicated by *pagep is leaked.
+
+Link: https://lkml.kernel.org/r/20220709092629.54291-1-linmiaohe@huawei.com
+Fixes: 8cc5fcbb5be8 ("mm, hugetlb: fix racy resv_huge_pages underflow on UFFDIO_COPY")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Acked-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5314,6 +5314,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+
+ page = alloc_huge_page(dst_vma, dst_addr, 0);
+ if (IS_ERR(page)) {
++ put_page(*pagep);
+ ret = -ENOMEM;
+ *pagep = NULL;
+ goto out;
--- /dev/null
+From 3fe2895cfecd03ac74977f32102b966b6589f481 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Tue, 5 Jul 2022 16:00:36 -0400
+Subject: mm: fix page leak with multiple threads mapping the same page
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 3fe2895cfecd03ac74977f32102b966b6589f481 upstream.
+
+We have an application with a lot of threads that use a shared mmap backed
+by tmpfs mounted with -o huge=within_size. This application started
+leaking loads of huge pages when we upgraded to a recent kernel.
+
+Using the page ref tracepoints and a BPF program written by Tejun Heo we
+were able to determine that these pages would have multiple refcounts from
+the page fault path, but when it came to unmap time we wouldn't drop the
+number of refs we had added from the faults.
+
+I wrote a reproducer that mmap'ed a file backed by tmpfs with -o
+huge=always, and then spawned 20 threads all looping faulting random
+offsets in this map, while using madvise(MADV_DONTNEED) randomly for huge
+page aligned ranges. This very quickly reproduced the problem.
+
+The problem here is that we check for the case that we have multiple
+threads faulting in a range that was previously unmapped. One thread maps
+the PMD, the other thread loses the race and then returns 0. However at
+this point we already have the page, and we are no longer putting this
+page into the processes address space, and so we leak the page. We
+actually did the correct thing prior to f9ce0be71d1f, however it looks
+like Kirill copied what we do in the anonymous page case. In the
+anonymous page case we don't yet have a page, so we don't have to drop a
+reference on anything. Previously we did the correct thing for file based
+faults by returning VM_FAULT_NOPAGE so we correctly drop the reference on
+the page we faulted in.
+
+Fix this by returning VM_FAULT_NOPAGE in the pmd_devmap_trans_unstable()
+case, this makes us drop the ref on the page properly, and now my
+reproducer no longer leaks the huge pages.
+
+[josef@toxicpanda.com: v2]
+ Link: https://lkml.kernel.org/r/e90c8f0dbae836632b669c2afc434006a00d4a67.1657721478.git.josef@toxicpanda.com
+Link: https://lkml.kernel.org/r/2b798acfd95c9ab9395fe85e8d5a835e2e10a920.1657051137.git.josef@toxicpanda.com
+Fixes: f9ce0be71d1f ("mm: Cleanup faultaround and finish_fault() codepaths")
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Rik van Riel <riel@surriel.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4071,9 +4071,12 @@ vm_fault_t finish_fault(struct vm_fault
+ }
+ }
+
+- /* See comment in handle_pte_fault() */
++ /*
++ * See comment in handle_pte_fault() for how this scenario happens, we
++ * need to return NOPAGE so that we drop this page.
++ */
+ if (pmd_devmap_trans_unstable(vmf->pmd))
+- return 0;
++ return VM_FAULT_NOPAGE;
+
+ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+ vmf->address, &vmf->ptl);
--- /dev/null
+From 66cee9097e2b74ff3c8cc040ce5717c521a0c3fa Mon Sep 17 00:00:00 2001
+From: Alistair Popple <apopple@nvidia.com>
+Date: Wed, 20 Jul 2022 16:27:45 +1000
+Subject: nouveau/svm: Fix to migrate all requested pages
+
+From: Alistair Popple <apopple@nvidia.com>
+
+commit 66cee9097e2b74ff3c8cc040ce5717c521a0c3fa upstream.
+
+Users may request that pages from an OpenCL SVM allocation be migrated
+to the GPU with clEnqueueSVMMigrateMem(). In Nouveau this will call into
+nouveau_dmem_migrate_vma() to do the migration. If the total range to be
+migrated exceeds SG_MAX_SINGLE_ALLOC the pages will be migrated in
+chunks of size SG_MAX_SINGLE_ALLOC. However a typo in updating the
+starting address means that only the first chunk will get migrated.
+
+Fix the calculation so that the entire range will get migrated if
+possible.
+
+Signed-off-by: Alistair Popple <apopple@nvidia.com>
+Fixes: e3d8b0890469 ("drm/nouveau/svm: map pages after migration")
+Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220720062745.960701-1-apopple@nvidia.com
+Cc: <stable@vger.kernel.org> # v5.8+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_dmem.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+@@ -679,7 +679,11 @@ nouveau_dmem_migrate_vma(struct nouveau_
+ goto out_free_dma;
+
+ for (i = 0; i < npages; i += max) {
+- args.end = start + (max << PAGE_SHIFT);
++ if (args.start + (max << PAGE_SHIFT) > end)
++ args.end = end;
++ else
++ args.end = args.start + (max << PAGE_SHIFT);
++
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out_free_pfns;
--- /dev/null
+From 38c9c22a85aeed28d0831f230136e9cf6fa2ed44 Mon Sep 17 00:00:00 2001
+From: ChenXiaoSong <chenxiaosong2@huawei.com>
+Date: Thu, 7 Jul 2022 18:53:29 +0800
+Subject: ntfs: fix use-after-free in ntfs_ucsncmp()
+
+From: ChenXiaoSong <chenxiaosong2@huawei.com>
+
+commit 38c9c22a85aeed28d0831f230136e9cf6fa2ed44 upstream.
+
+Syzkaller reported use-after-free bug as follows:
+
+==================================================================
+BUG: KASAN: use-after-free in ntfs_ucsncmp+0x123/0x130
+Read of size 2 at addr ffff8880751acee8 by task a.out/879
+
+CPU: 7 PID: 879 Comm: a.out Not tainted 5.19.0-rc4-next-20220630-00001-gcc5218c8bd2c-dirty #7
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x1c0/0x2b0
+ print_address_description.constprop.0.cold+0xd4/0x484
+ print_report.cold+0x55/0x232
+ kasan_report+0xbf/0xf0
+ ntfs_ucsncmp+0x123/0x130
+ ntfs_are_names_equal.cold+0x2b/0x41
+ ntfs_attr_find+0x43b/0xb90
+ ntfs_attr_lookup+0x16d/0x1e0
+ ntfs_read_locked_attr_inode+0x4aa/0x2360
+ ntfs_attr_iget+0x1af/0x220
+ ntfs_read_locked_inode+0x246c/0x5120
+ ntfs_iget+0x132/0x180
+ load_system_files+0x1cc6/0x3480
+ ntfs_fill_super+0xa66/0x1cf0
+ mount_bdev+0x38d/0x460
+ legacy_get_tree+0x10d/0x220
+ vfs_get_tree+0x93/0x300
+ do_new_mount+0x2da/0x6d0
+ path_mount+0x496/0x19d0
+ __x64_sys_mount+0x284/0x300
+ do_syscall_64+0x3b/0xc0
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+RIP: 0033:0x7f3f2118d9ea
+Code: 48 8b 0d a9 f4 0b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 76 f4 0b 00 f7 d8 64 89 01 48
+RSP: 002b:00007ffc269deac8 EFLAGS: 00000202 ORIG_RAX: 00000000000000a5
+RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f3f2118d9ea
+RDX: 0000000020000000 RSI: 0000000020000100 RDI: 00007ffc269dec00
+RBP: 00007ffc269dec80 R08: 00007ffc269deb00 R09: 00007ffc269dec44
+R10: 0000000000000000 R11: 0000000000000202 R12: 000055f81ab1d220
+R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ </TASK>
+
+The buggy address belongs to the physical page:
+page:0000000085430378 refcount:1 mapcount:1 mapping:0000000000000000 index:0x555c6a81d pfn:0x751ac
+memcg:ffff888101f7e180
+anon flags: 0xfffffc00a0014(uptodate|lru|mappedtodisk|swapbacked|node=0|zone=1|lastcpupid=0x1fffff)
+raw: 000fffffc00a0014 ffffea0001bf2988 ffffea0001de2448 ffff88801712e201
+raw: 0000000555c6a81d 0000000000000000 0000000100000000 ffff888101f7e180
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff8880751acd80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff8880751ace00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+>ffff8880751ace80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ^
+ ffff8880751acf00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff8880751acf80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+==================================================================
+
+The reason is that struct ATTR_RECORD->name_offset is 6485, end address of
+name string is out of bounds.
+
+Fix this by adding sanity check on end address of attribute name string.
+
+[akpm@linux-foundation.org: coding-style cleanups]
+[chenxiaosong2@huawei.com: cleanup suggested by Hawkins Jiawei]
+ Link: https://lkml.kernel.org/r/20220709064511.3304299-1-chenxiaosong2@huawei.com
+Link: https://lkml.kernel.org/r/20220707105329.4020708-1-chenxiaosong2@huawei.com
+Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
+Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
+Cc: Anton Altaparmakov <anton@tuxera.com>
+Cc: ChenXiaoSong <chenxiaosong2@huawei.com>
+Cc: Yongqiang Liu <liuyongqiang13@huawei.com>
+Cc: Zhang Yi <yi.zhang@huawei.com>
+Cc: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs/attrib.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/fs/ntfs/attrib.c
++++ b/fs/ntfs/attrib.c
+@@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYP
+ a = (ATTR_RECORD*)((u8*)ctx->attr +
+ le32_to_cpu(ctx->attr->length));
+ for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
+- if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
+- le32_to_cpu(ctx->mrec->bytes_allocated))
++ u8 *mrec_end = (u8 *)ctx->mrec +
++ le32_to_cpu(ctx->mrec->bytes_allocated);
++ u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
++ a->name_length * sizeof(ntfschar);
++ if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
++ name_end > mrec_end)
+ break;
+ ctx->attr = a;
+ if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
--- /dev/null
+From c80af0c250c8f8a3c978aa5aafbe9c39b336b813 Mon Sep 17 00:00:00 2001
+From: Junxiao Bi <ocfs2-devel@oss.oracle.com>
+Date: Fri, 3 Jun 2022 15:28:01 -0700
+Subject: Revert "ocfs2: mount shared volume without ha stack"
+
+From: Junxiao Bi <ocfs2-devel@oss.oracle.com>
+
+commit c80af0c250c8f8a3c978aa5aafbe9c39b336b813 upstream.
+
+This reverts commit 912f655d78c5d4ad05eac287f23a435924df7144.
+
+This commit introduced a regression that can cause mount hung. The
+changes in __ocfs2_find_empty_slot causes that any node with none-zero
+node number can grab the slot that was already taken by node 0, so node 1
+will access the same journal with node 0, when it try to grab journal
+cluster lock, it will hung because it was already acquired by node 0.
+It's very easy to reproduce this, in one cluster, mount node 0 first, then
+node 1, you will see the following call trace from node 1.
+
+[13148.735424] INFO: task mount.ocfs2:53045 blocked for more than 122 seconds.
+[13148.739691] Not tainted 5.15.0-2148.0.4.el8uek.mountracev2.x86_64 #2
+[13148.742560] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[13148.745846] task:mount.ocfs2 state:D stack: 0 pid:53045 ppid: 53044 flags:0x00004000
+[13148.749354] Call Trace:
+[13148.750718] <TASK>
+[13148.752019] ? usleep_range+0x90/0x89
+[13148.753882] __schedule+0x210/0x567
+[13148.755684] schedule+0x44/0xa8
+[13148.757270] schedule_timeout+0x106/0x13c
+[13148.759273] ? __prepare_to_swait+0x53/0x78
+[13148.761218] __wait_for_common+0xae/0x163
+[13148.763144] __ocfs2_cluster_lock.constprop.0+0x1d6/0x870 [ocfs2]
+[13148.765780] ? ocfs2_inode_lock_full_nested+0x18d/0x398 [ocfs2]
+[13148.768312] ocfs2_inode_lock_full_nested+0x18d/0x398 [ocfs2]
+[13148.770968] ocfs2_journal_init+0x91/0x340 [ocfs2]
+[13148.773202] ocfs2_check_volume+0x39/0x461 [ocfs2]
+[13148.775401] ? iput+0x69/0xba
+[13148.777047] ocfs2_mount_volume.isra.0.cold+0x40/0x1f5 [ocfs2]
+[13148.779646] ocfs2_fill_super+0x54b/0x853 [ocfs2]
+[13148.781756] mount_bdev+0x190/0x1b7
+[13148.783443] ? ocfs2_remount+0x440/0x440 [ocfs2]
+[13148.785634] legacy_get_tree+0x27/0x48
+[13148.787466] vfs_get_tree+0x25/0xd0
+[13148.789270] do_new_mount+0x18c/0x2d9
+[13148.791046] __x64_sys_mount+0x10e/0x142
+[13148.792911] do_syscall_64+0x3b/0x89
+[13148.794667] entry_SYSCALL_64_after_hwframe+0x170/0x0
+[13148.797051] RIP: 0033:0x7f2309f6e26e
+[13148.798784] RSP: 002b:00007ffdcee7d408 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
+[13148.801974] RAX: ffffffffffffffda RBX: 00007ffdcee7d4a0 RCX: 00007f2309f6e26e
+[13148.804815] RDX: 0000559aa762a8ae RSI: 0000559aa939d340 RDI: 0000559aa93a22b0
+[13148.807719] RBP: 00007ffdcee7d5b0 R08: 0000559aa93a2290 R09: 00007f230a0b4820
+[13148.810659] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffdcee7d420
+[13148.813609] R13: 0000000000000000 R14: 0000559aa939f000 R15: 0000000000000000
+[13148.816564] </TASK>
+
+To fix it, we can just fix __ocfs2_find_empty_slot. But original commit
+introduced the feature to mount ocfs2 locally even it is cluster based,
+that is a very dangerous, it can easily cause serious data corruption,
+there is no way to stop other nodes mounting the fs and corrupting it.
+Setup ha or other cluster-aware stack is just the cost that we have to
+take for avoiding corruption, otherwise we have to do it in kernel.
+
+Link: https://lkml.kernel.org/r/20220603222801.42488-1-junxiao.bi@oracle.com
+Fixes: 912f655d78c5("ocfs2: mount shared volume without ha stack")
+Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
+Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <heming.zhao@suse.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/ocfs2.h | 4 +---
+ fs/ocfs2/slot_map.c | 46 +++++++++++++++++++---------------------------
+ fs/ocfs2/super.c | 21 ---------------------
+ 3 files changed, 20 insertions(+), 51 deletions(-)
+
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -277,7 +277,6 @@ enum ocfs2_mount_options
+ OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */
+ OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
+ OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
+- OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */
+ };
+
+ #define OCFS2_OSB_SOFT_RO 0x0001
+@@ -673,8 +672,7 @@ static inline int ocfs2_cluster_o2cb_glo
+
+ static inline int ocfs2_mount_local(struct ocfs2_super *osb)
+ {
+- return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT)
+- || (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER));
++ return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
+ }
+
+ static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
+--- a/fs/ocfs2/slot_map.c
++++ b/fs/ocfs2/slot_map.c
+@@ -252,16 +252,14 @@ static int __ocfs2_find_empty_slot(struc
+ int i, ret = -ENOSPC;
+
+ if ((preferred >= 0) && (preferred < si->si_num_slots)) {
+- if (!si->si_slots[preferred].sl_valid ||
+- !si->si_slots[preferred].sl_node_num) {
++ if (!si->si_slots[preferred].sl_valid) {
+ ret = preferred;
+ goto out;
+ }
+ }
+
+ for(i = 0; i < si->si_num_slots; i++) {
+- if (!si->si_slots[i].sl_valid ||
+- !si->si_slots[i].sl_node_num) {
++ if (!si->si_slots[i].sl_valid) {
+ ret = i;
+ break;
+ }
+@@ -456,30 +454,24 @@ int ocfs2_find_slot(struct ocfs2_super *
+ spin_lock(&osb->osb_lock);
+ ocfs2_update_slot_info(si);
+
+- if (ocfs2_mount_local(osb))
+- /* use slot 0 directly in local mode */
+- slot = 0;
+- else {
+- /* search for ourselves first and take the slot if it already
+- * exists. Perhaps we need to mark this in a variable for our
+- * own journal recovery? Possibly not, though we certainly
+- * need to warn to the user */
+- slot = __ocfs2_node_num_to_slot(si, osb->node_num);
++ /* search for ourselves first and take the slot if it already
++ * exists. Perhaps we need to mark this in a variable for our
++ * own journal recovery? Possibly not, though we certainly
++ * need to warn to the user */
++ slot = __ocfs2_node_num_to_slot(si, osb->node_num);
++ if (slot < 0) {
++ /* if no slot yet, then just take 1st available
++ * one. */
++ slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
+ if (slot < 0) {
+- /* if no slot yet, then just take 1st available
+- * one. */
+- slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
+- if (slot < 0) {
+- spin_unlock(&osb->osb_lock);
+- mlog(ML_ERROR, "no free slots available!\n");
+- status = -EINVAL;
+- goto bail;
+- }
+- } else
+- printk(KERN_INFO "ocfs2: Slot %d on device (%s) was "
+- "already allocated to this node!\n",
+- slot, osb->dev_str);
+- }
++ spin_unlock(&osb->osb_lock);
++ mlog(ML_ERROR, "no free slots available!\n");
++ status = -EINVAL;
++ goto bail;
++ }
++ } else
++ printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
++ "allocated to this node!\n", slot, osb->dev_str);
+
+ ocfs2_set_slot(si, slot, osb->node_num);
+ osb->slot_num = slot;
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -173,7 +173,6 @@ enum {
+ Opt_dir_resv_level,
+ Opt_journal_async_commit,
+ Opt_err_cont,
+- Opt_nocluster,
+ Opt_err,
+ };
+
+@@ -207,7 +206,6 @@ static const match_table_t tokens = {
+ {Opt_dir_resv_level, "dir_resv_level=%u"},
+ {Opt_journal_async_commit, "journal_async_commit"},
+ {Opt_err_cont, "errors=continue"},
+- {Opt_nocluster, "nocluster"},
+ {Opt_err, NULL}
+ };
+
+@@ -619,13 +617,6 @@ static int ocfs2_remount(struct super_bl
+ goto out;
+ }
+
+- tmp = OCFS2_MOUNT_NOCLUSTER;
+- if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
+- ret = -EINVAL;
+- mlog(ML_ERROR, "Cannot change nocluster option on remount\n");
+- goto out;
+- }
+-
+ tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
+ OCFS2_MOUNT_HB_NONE;
+ if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
+@@ -866,7 +857,6 @@ static int ocfs2_verify_userspace_stack(
+ }
+
+ if (ocfs2_userspace_stack(osb) &&
+- !(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
+ strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
+ OCFS2_STACK_LABEL_LEN)) {
+ mlog(ML_ERROR,
+@@ -1145,11 +1135,6 @@ static int ocfs2_fill_super(struct super
+ osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
+ "ordered");
+
+- if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
+- !(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT))
+- printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted "
+- "without cluster aware mode.\n", osb->dev_str);
+-
+ atomic_set(&osb->vol_state, VOLUME_MOUNTED);
+ wake_up(&osb->osb_mount_event);
+
+@@ -1456,9 +1441,6 @@ static int ocfs2_parse_options(struct su
+ case Opt_journal_async_commit:
+ mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
+ break;
+- case Opt_nocluster:
+- mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER;
+- break;
+ default:
+ mlog(ML_ERROR,
+ "Unrecognized mount option \"%s\" "
+@@ -1570,9 +1552,6 @@ static int ocfs2_show_options(struct seq
+ if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
+ seq_printf(s, ",journal_async_commit");
+
+- if (opts & OCFS2_MOUNT_NOCLUSTER)
+- seq_printf(s, ",nocluster");
+-
+ return 0;
+ }
+
--- /dev/null
+From 918e75f77af7d2e049bb70469ec0a2c12782d96a Mon Sep 17 00:00:00 2001
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Wed, 13 Jul 2022 15:17:21 +0200
+Subject: s390/archrandom: prevent CPACF trng invocations in interrupt context
+
+From: Harald Freudenberger <freude@linux.ibm.com>
+
+commit 918e75f77af7d2e049bb70469ec0a2c12782d96a upstream.
+
+This patch slightly reworks the s390 arch_get_random_seed_{int,long}
+implementation: Make sure the CPACF trng instruction is never
+called in any interrupt context. This is done by adding an
+additional condition in_task().
+
+Justification:
+
+There are some constrains to satisfy for the invocation of the
+arch_get_random_seed_{int,long}() functions:
+- They should provide good random data during kernel initialization.
+- They should not be called in interrupt context as the TRNG
+ instruction is relatively heavy weight and may for example
+ make some network loads cause to timeout and buck.
+
+However, it was not clear what kind of interrupt context is exactly
+encountered during kernel init or network traffic eventually calling
+arch_get_random_seed_long().
+
+After some days of investigations it is clear that the s390
+start_kernel function is not running in any interrupt context and
+so the trng is called:
+
+Jul 11 18:33:39 t35lp54 kernel: [<00000001064e90ca>] arch_get_random_seed_long.part.0+0x32/0x70
+Jul 11 18:33:39 t35lp54 kernel: [<000000010715f246>] random_init+0xf6/0x238
+Jul 11 18:33:39 t35lp54 kernel: [<000000010712545c>] start_kernel+0x4a4/0x628
+Jul 11 18:33:39 t35lp54 kernel: [<000000010590402a>] startup_continue+0x2a/0x40
+
+The condition in_task() is true and the CPACF trng provides random data
+during kernel startup.
+
+The network traffic however, is more difficult. A typical call stack
+looks like this:
+
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b5600fc>] extract_entropy.constprop.0+0x23c/0x240
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b560136>] crng_reseed+0x36/0xd8
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b5604b8>] crng_make_state+0x78/0x340
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b5607e0>] _get_random_bytes+0x60/0xf8
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b56108a>] get_random_u32+0xda/0x248
+Jul 06 17:37:07 t35lp54 kernel: [<000000008aefe7a8>] kfence_guarded_alloc+0x48/0x4b8
+Jul 06 17:37:07 t35lp54 kernel: [<000000008aeff35e>] __kfence_alloc+0x18e/0x1b8
+Jul 06 17:37:07 t35lp54 kernel: [<000000008aef7f10>] __kmalloc_node_track_caller+0x368/0x4d8
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b611eac>] kmalloc_reserve+0x44/0xa0
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b611f98>] __alloc_skb+0x90/0x178
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b6120dc>] __napi_alloc_skb+0x5c/0x118
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b8f06b4>] qeth_extract_skb+0x13c/0x680
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b8f6526>] qeth_poll+0x256/0x3f8
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b63d76e>] __napi_poll.constprop.0+0x46/0x2f8
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b63dbec>] net_rx_action+0x1cc/0x408
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b937302>] __do_softirq+0x132/0x6b0
+Jul 06 17:37:07 t35lp54 kernel: [<000000008abf46ce>] __irq_exit_rcu+0x13e/0x170
+Jul 06 17:37:07 t35lp54 kernel: [<000000008abf531a>] irq_exit_rcu+0x22/0x50
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b922506>] do_io_irq+0xe6/0x198
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b935826>] io_int_handler+0xd6/0x110
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b9358a6>] psw_idle_exit+0x0/0xa
+Jul 06 17:37:07 t35lp54 kernel: ([<000000008ab9c59a>] arch_cpu_idle+0x52/0xe0)
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b933cfe>] default_idle_call+0x6e/0xd0
+Jul 06 17:37:07 t35lp54 kernel: [<000000008ac59f4e>] do_idle+0xf6/0x1b0
+Jul 06 17:37:07 t35lp54 kernel: [<000000008ac5a28e>] cpu_startup_entry+0x36/0x40
+Jul 06 17:37:07 t35lp54 kernel: [<000000008abb0d90>] smp_start_secondary+0x148/0x158
+Jul 06 17:37:07 t35lp54 kernel: [<000000008b935b9e>] restart_int_handler+0x6e/0x90
+
+which confirms that the call is in softirq context. So in_task() covers exactly
+the cases where we want to have CPACF trng called: not in nmi, not in hard irq,
+not in soft irq but in normal task context and during kernel init.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Acked-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220713131721.257907-1-freude@linux.ibm.com
+Fixes: e4f74400308c ("s390/archrandom: simplify back to earlier design and initialize earlier")
+[agordeev@linux.ibm.com changed desc, added Fixes and Link, removed -stable]
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/archrandom.h | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/include/asm/archrandom.h
++++ b/arch/s390/include/asm/archrandom.h
+@@ -2,7 +2,7 @@
+ /*
+ * Kernel interface for the s390 arch_random_* functions
+ *
+- * Copyright IBM Corp. 2017, 2020
++ * Copyright IBM Corp. 2017, 2022
+ *
+ * Author: Harald Freudenberger <freude@de.ibm.com>
+ *
+@@ -14,6 +14,7 @@
+ #ifdef CONFIG_ARCH_RANDOM
+
+ #include <linux/static_key.h>
++#include <linux/preempt.h>
+ #include <linux/atomic.h>
+ #include <asm/cpacf.h>
+
+@@ -32,7 +33,8 @@ static inline bool __must_check arch_get
+
+ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+ {
+- if (static_branch_likely(&s390_arch_random_available)) {
++ if (static_branch_likely(&s390_arch_random_available) &&
++ in_task()) {
+ cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
+ atomic64_add(sizeof(*v), &s390_arch_random_counter);
+ return true;
+@@ -42,7 +44,8 @@ static inline bool __must_check arch_get
+
+ static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+ {
+- if (static_branch_likely(&s390_arch_random_available)) {
++ if (static_branch_likely(&s390_arch_random_available) &&
++ in_task()) {
+ cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
+ atomic64_add(sizeof(*v), &s390_arch_random_counter);
+ return true;
--- /dev/null
+From 84ac013046ccc438af04b7acecd4d3ab84fe4bde Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.ibm.com>
+Date: Thu, 7 Jul 2022 19:56:50 +0300
+Subject: secretmem: fix unhandled fault in truncate
+
+From: Mike Rapoport <rppt@linux.ibm.com>
+
+commit 84ac013046ccc438af04b7acecd4d3ab84fe4bde upstream.
+
+syzkaller reports the following issue:
+
+BUG: unable to handle page fault for address: ffff888021f7e005
+PGD 11401067 P4D 11401067 PUD 11402067 PMD 21f7d063 PTE 800fffffde081060
+Oops: 0002 [#1] PREEMPT SMP KASAN
+CPU: 0 PID: 3761 Comm: syz-executor281 Not tainted 5.19.0-rc4-syzkaller-00014-g941e3e791269 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:memset_erms+0x9/0x10 arch/x86/lib/memset_64.S:64
+Code: c1 e9 03 40 0f b6 f6 48 b8 01 01 01 01 01 01 01 01 48 0f af c6 f3 48 ab 89 d1 f3 aa 4c 89 c8 c3 90 49 89 f9 40 88 f0 48 89 d1 <f3> aa 4c 89 c8 c3 90 49 89 fa 40 0f b6 ce 48 b8 01 01 01 01 01 01
+RSP: 0018:ffffc9000329fa90 EFLAGS: 00010202
+RAX: 0000000000000000 RBX: 0000000000001000 RCX: 0000000000000ffb
+RDX: 0000000000000ffb RSI: 0000000000000000 RDI: ffff888021f7e005
+RBP: ffffea000087df80 R08: 0000000000000001 R09: ffff888021f7e005
+R10: ffffed10043efdff R11: 0000000000000000 R12: 0000000000000005
+R13: 0000000000000000 R14: 0000000000001000 R15: 0000000000000ffb
+FS: 00007fb29d8b2700(0000) GS:ffff8880b9a00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: ffff888021f7e005 CR3: 0000000026e7b000 CR4: 00000000003506f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ zero_user_segments include/linux/highmem.h:272 [inline]
+ folio_zero_range include/linux/highmem.h:428 [inline]
+ truncate_inode_partial_folio+0x76a/0xdf0 mm/truncate.c:237
+ truncate_inode_pages_range+0x83b/0x1530 mm/truncate.c:381
+ truncate_inode_pages mm/truncate.c:452 [inline]
+ truncate_pagecache+0x63/0x90 mm/truncate.c:753
+ simple_setattr+0xed/0x110 fs/libfs.c:535
+ secretmem_setattr+0xae/0xf0 mm/secretmem.c:170
+ notify_change+0xb8c/0x12b0 fs/attr.c:424
+ do_truncate+0x13c/0x200 fs/open.c:65
+ do_sys_ftruncate+0x536/0x730 fs/open.c:193
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+RIP: 0033:0x7fb29d900899
+Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 11 15 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007fb29d8b2318 EFLAGS: 00000246 ORIG_RAX: 000000000000004d
+RAX: ffffffffffffffda RBX: 00007fb29d988408 RCX: 00007fb29d900899
+RDX: 00007fb29d900899 RSI: 0000000000000005 RDI: 0000000000000003
+RBP: 00007fb29d988400 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb29d98840c
+R13: 00007ffca01a23bf R14: 00007fb29d8b2400 R15: 0000000000022000
+ </TASK>
+Modules linked in:
+CR2: ffff888021f7e005
+---[ end trace 0000000000000000 ]---
+
+Eric Biggers suggested that this happens when
+secretmem_setattr()->simple_setattr() races with secretmem_fault() so that
+a page that is faulted in by secretmem_fault() (and thus removed from the
+direct map) is zeroed by inode truncation right afterwards.
+
+Use mapping->invalidate_lock to make secretmem_fault() and
+secretmem_setattr() mutually exclusive.
+
+[rppt@linux.ibm.com: v3]
+ Link: https://lkml.kernel.org/r/20220714091337.412297-1-rppt@kernel.org
+Link: https://lkml.kernel.org/r/20220707165650.248088-1-rppt@kernel.org
+Reported-by: syzbot+9bd2b7adbd34b30b87e4@syzkaller.appspotmail.com
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Suggested-by: Eric Biggers <ebiggers@kernel.org>
+Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Eric Biggers <ebiggers@kernel.org>
+Cc: Hillf Danton <hdanton@sina.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/secretmem.c | 33 ++++++++++++++++++++++++++-------
+ 1 file changed, 26 insertions(+), 7 deletions(-)
+
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -55,22 +55,28 @@ static vm_fault_t secretmem_fault(struct
+ gfp_t gfp = vmf->gfp_mask;
+ unsigned long addr;
+ struct page *page;
++ vm_fault_t ret;
+ int err;
+
+ if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
+ return vmf_error(-EINVAL);
+
++ filemap_invalidate_lock_shared(mapping);
++
+ retry:
+ page = find_lock_page(mapping, offset);
+ if (!page) {
+ page = alloc_page(gfp | __GFP_ZERO);
+- if (!page)
+- return VM_FAULT_OOM;
++ if (!page) {
++ ret = VM_FAULT_OOM;
++ goto out;
++ }
+
+ err = set_direct_map_invalid_noflush(page);
+ if (err) {
+ put_page(page);
+- return vmf_error(err);
++ ret = vmf_error(err);
++ goto out;
+ }
+
+ __SetPageUptodate(page);
+@@ -86,7 +92,8 @@ retry:
+ if (err == -EEXIST)
+ goto retry;
+
+- return vmf_error(err);
++ ret = vmf_error(err);
++ goto out;
+ }
+
+ addr = (unsigned long)page_address(page);
+@@ -94,7 +101,11 @@ retry:
+ }
+
+ vmf->page = page;
+- return VM_FAULT_LOCKED;
++ ret = VM_FAULT_LOCKED;
++
++out:
++ filemap_invalidate_unlock_shared(mapping);
++ return ret;
+ }
+
+ static const struct vm_operations_struct secretmem_vm_ops = {
+@@ -162,12 +173,20 @@ static int secretmem_setattr(struct user
+ struct dentry *dentry, struct iattr *iattr)
+ {
+ struct inode *inode = d_inode(dentry);
++ struct address_space *mapping = inode->i_mapping;
+ unsigned int ia_valid = iattr->ia_valid;
++ int ret;
++
++ filemap_invalidate_lock(mapping);
+
+ if ((ia_valid & ATTR_SIZE) && inode->i_size)
+- return -EINVAL;
++ ret = -EINVAL;
++ else
++ ret = simple_setattr(mnt_userns, dentry, iattr);
++
++ filemap_invalidate_unlock(mapping);
+
+- return simple_setattr(mnt_userns, dentry, iattr);
++ return ret;
+ }
+
+ static const struct inode_operations secretmem_iops = {
--- /dev/null
+bluetooth-l2cap-fix-use-after-free-caused-by-l2cap_chan_put.patch
+revert-ocfs2-mount-shared-volume-without-ha-stack.patch
+ntfs-fix-use-after-free-in-ntfs_ucsncmp.patch
+fs-sendfile-handles-o_nonblock-of-out_fd.patch
+secretmem-fix-unhandled-fault-in-truncate.patch
+mm-fix-page-leak-with-multiple-threads-mapping-the-same-page.patch
+hugetlb-fix-memoryleak-in-hugetlb_mcopy_atomic_pte.patch
+asm-generic-remove-a-broken-and-needless-ifdef-conditional.patch
+s390-archrandom-prevent-cpacf-trng-invocations-in-interrupt-context.patch
+nouveau-svm-fix-to-migrate-all-requested-pages.patch
+drm-simpledrm-fix-return-type-of-simpledrm_simple_display_pipe_mode_valid.patch
+watch_queue-fix-missing-rcu-annotation.patch
+watch_queue-fix-missing-locking-in-add_watch_to_object.patch
+tcp-fix-data-races-around-sysctl_tcp_dsack.patch
+tcp-fix-a-data-race-around-sysctl_tcp_app_win.patch
+tcp-fix-a-data-race-around-sysctl_tcp_adv_win_scale.patch
+tcp-fix-a-data-race-around-sysctl_tcp_frto.patch
+tcp-fix-a-data-race-around-sysctl_tcp_nometrics_save.patch
+tcp-fix-data-races-around-sysctl_tcp_no_ssthresh_metrics_save.patch
--- /dev/null
+From 36eeee75ef0157e42fb6593dcc65daab289b559e Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+Date: Wed, 20 Jul 2022 09:50:14 -0700
+Subject: tcp: Fix a data-race around sysctl_tcp_adv_win_scale.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+commit 36eeee75ef0157e42fb6593dcc65daab289b559e upstream.
+
+While reading sysctl_tcp_adv_win_scale, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1406,7 +1406,7 @@ void tcp_select_initial_window(const str
+
+ static inline int tcp_win_from_space(const struct sock *sk, int space)
+ {
+- int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
++ int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
+
+ return tcp_adv_win_scale <= 0 ?
+ (space>>(-tcp_adv_win_scale)) :
--- /dev/null
+From 02ca527ac5581cf56749db9fd03d854e842253dd Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+Date: Wed, 20 Jul 2022 09:50:13 -0700
+Subject: tcp: Fix a data-race around sysctl_tcp_app_win.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+commit 02ca527ac5581cf56749db9fd03d854e842253dd upstream.
+
+While reading sysctl_tcp_app_win, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -526,7 +526,7 @@ static void tcp_grow_window(struct sock
+ */
+ static void tcp_init_buffer_space(struct sock *sk)
+ {
+- int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
++ int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int maxwin;
+
--- /dev/null
+From 706c6202a3589f290e1ef9be0584a8f4a3cc0507 Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+Date: Wed, 20 Jul 2022 09:50:15 -0700
+Subject: tcp: Fix a data-race around sysctl_tcp_frto.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+commit 706c6202a3589f290e1ef9be0584a8f4a3cc0507 upstream.
+
+While reading sysctl_tcp_frto, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2167,7 +2167,7 @@ void tcp_enter_loss(struct sock *sk)
+ * loss recovery is underway except recurring timeout(s) on
+ * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
+ */
+- tp->frto = net->ipv4.sysctl_tcp_frto &&
++ tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
+ (new_recovery || icsk->icsk_retransmits) &&
+ !inet_csk(sk)->icsk_mtup.probe_size;
+ }
--- /dev/null
+From 8499a2454d9e8a55ce616ede9f9580f36fd5b0f3 Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+Date: Wed, 20 Jul 2022 09:50:16 -0700
+Subject: tcp: Fix a data-race around sysctl_tcp_nometrics_save.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+commit 8499a2454d9e8a55ce616ede9f9580f36fd5b0f3 upstream.
+
+While reading sysctl_tcp_nometrics_save, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its reader.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_metrics.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
+ int m;
+
+ sk_dst_confirm(sk);
+- if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
++ if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
+ return;
+
+ rcu_read_lock();
--- /dev/null
+From 58ebb1c8b35a8ef38cd6927431e0fa7b173a632d Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+Date: Wed, 20 Jul 2022 09:50:12 -0700
+Subject: tcp: Fix data-races around sysctl_tcp_dsack.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+commit 58ebb1c8b35a8ef38cd6927431e0fa7b173a632d upstream.
+
+While reading sysctl_tcp_dsack, it can be changed concurrently.
+Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4419,7 +4419,7 @@ static void tcp_dsack_set(struct sock *s
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
++ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
+ int mib_idx;
+
+ if (before(seq, tp->rcv_nxt))
+@@ -4466,7 +4466,7 @@ static void tcp_send_dupack(struct sock
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
+
+- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
++ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
+ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+
+ tcp_rcv_spurious_retrans(sk, skb);
--- /dev/null
+From ab1ba21b523ab496b1a4a8e396333b24b0a18f9a Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+Date: Wed, 20 Jul 2022 09:50:17 -0700
+Subject: tcp: Fix data-races around sysctl_tcp_no_ssthresh_metrics_save.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+commit ab1ba21b523ab496b1a4a8e396333b24b0a18f9a upstream.
+
+While reading sysctl_tcp_no_ssthresh_metrics_save, it can be changed
+concurrently. Thus, we need to add READ_ONCE() to its readers.
+
+Fixes: 65e6d90168f3 ("net-tcp: Disable TCP ssthresh metrics cache by default")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_metrics.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -385,7 +385,7 @@ void tcp_update_metrics(struct sock *sk)
+
+ if (tcp_in_initial_slowstart(tp)) {
+ /* Slow start still did not finish. */
+- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
++ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
+ !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val && (tcp_snd_cwnd(tp) >> 1) > val)
+@@ -401,7 +401,7 @@ void tcp_update_metrics(struct sock *sk)
+ } else if (!tcp_in_slow_start(tp) &&
+ icsk->icsk_ca_state == TCP_CA_Open) {
+ /* Cong. avoidance phase, cwnd is reliable. */
+- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
++ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
+ !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
+@@ -418,7 +418,7 @@ void tcp_update_metrics(struct sock *sk)
+ tcp_metric_set(tm, TCP_METRIC_CWND,
+ (val + tp->snd_ssthresh) >> 1);
+ }
+- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
++ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
+ !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val && tp->snd_ssthresh > val)
+@@ -463,7 +463,7 @@ void tcp_init_metrics(struct sock *sk)
+ if (tcp_metric_locked(tm, TCP_METRIC_CWND))
+ tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
+
+- val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
++ val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
+ 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val) {
+ tp->snd_ssthresh = val;
--- /dev/null
+From e64ab2dbd882933b65cd82ff6235d705ad65dbb6 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 28 Jul 2022 10:31:12 +0100
+Subject: watch_queue: Fix missing locking in add_watch_to_object()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit e64ab2dbd882933b65cd82ff6235d705ad65dbb6 upstream.
+
+If a watch is being added to a queue, it needs to guard against
+interference from addition of a new watch, manual removal of a watch and
+removal of a watch due to some other queue being destroyed.
+
+KEYCTL_WATCH_KEY guards against this for the same {key,queue} pair by
+holding the key->sem writelocked and by holding refs on both the key and
+the queue - but that doesn't prevent interaction from other {key,queue}
+pairs.
+
+While add_watch_to_object() does take the spinlock on the event queue,
+it doesn't take the lock on the source's watch list. The assumption was
+that the caller would prevent that (say by taking key->sem) - but that
+doesn't prevent interference from the destruction of another queue.
+
+Fix this by locking the watcher list in add_watch_to_object().
+
+Fixes: c73be61cede5 ("pipe: Add general notification queue support")
+Reported-by: syzbot+03d7b43290037d1f87ca@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: keyrings@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/watch_queue.c | 58 +++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 36 insertions(+), 22 deletions(-)
+
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -457,6 +457,33 @@ void init_watch(struct watch *watch, str
+ rcu_assign_pointer(watch->queue, wqueue);
+ }
+
++static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
++{
++ const struct cred *cred;
++ struct watch *w;
++
++ hlist_for_each_entry(w, &wlist->watchers, list_node) {
++ struct watch_queue *wq = rcu_access_pointer(w->queue);
++ if (wqueue == wq && watch->id == w->id)
++ return -EBUSY;
++ }
++
++ cred = current_cred();
++ if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
++ atomic_dec(&cred->user->nr_watches);
++ return -EAGAIN;
++ }
++
++ watch->cred = get_cred(cred);
++ rcu_assign_pointer(watch->watch_list, wlist);
++
++ kref_get(&wqueue->usage);
++ kref_get(&watch->usage);
++ hlist_add_head(&watch->queue_node, &wqueue->watches);
++ hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
++ return 0;
++}
++
+ /**
+ * add_watch_to_object - Add a watch on an object to a watch list
+ * @watch: The watch to add
+@@ -471,34 +498,21 @@ void init_watch(struct watch *watch, str
+ */
+ int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
+ {
+- struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
+- struct watch *w;
++ struct watch_queue *wqueue;
++ int ret = -ENOENT;
+
+- hlist_for_each_entry(w, &wlist->watchers, list_node) {
+- struct watch_queue *wq = rcu_access_pointer(w->queue);
+- if (wqueue == wq && watch->id == w->id)
+- return -EBUSY;
+- }
+-
+- watch->cred = get_current_cred();
+- rcu_assign_pointer(watch->watch_list, wlist);
+-
+- if (atomic_inc_return(&watch->cred->user->nr_watches) >
+- task_rlimit(current, RLIMIT_NOFILE)) {
+- atomic_dec(&watch->cred->user->nr_watches);
+- put_cred(watch->cred);
+- return -EAGAIN;
+- }
++ rcu_read_lock();
+
++ wqueue = rcu_access_pointer(watch->queue);
+ if (lock_wqueue(wqueue)) {
+- kref_get(&wqueue->usage);
+- kref_get(&watch->usage);
+- hlist_add_head(&watch->queue_node, &wqueue->watches);
++ spin_lock(&wlist->lock);
++ ret = add_one_watch(watch, wlist, wqueue);
++ spin_unlock(&wlist->lock);
+ unlock_wqueue(wqueue);
+ }
+
+- hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
+- return 0;
++ rcu_read_unlock();
++ return ret;
+ }
+ EXPORT_SYMBOL(add_watch_to_object);
+
--- /dev/null
+From e0339f036ef4beb9b20f0b6532a1e0ece7f594c6 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Thu, 28 Jul 2022 10:31:06 +0100
+Subject: watch_queue: Fix missing rcu annotation
+
+From: David Howells <dhowells@redhat.com>
+
+commit e0339f036ef4beb9b20f0b6532a1e0ece7f594c6 upstream.
+
+Since __post_watch_notification() walks wlist->watchers with only the
+RCU read lock held, we need to use RCU methods to add to the list (we
+already use RCU methods to remove from the list).
+
+Fix add_watch_to_object() to use hlist_add_head_rcu() instead of
+hlist_add_head() for that list.
+
+Fixes: c73be61cede5 ("pipe: Add general notification queue support")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/watch_queue.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -497,7 +497,7 @@ int add_watch_to_object(struct watch *wa
+ unlock_wqueue(wqueue);
+ }
+
+- hlist_add_head(&watch->list_node, &wlist->watchers);
++ hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
+ return 0;
+ }
+ EXPORT_SYMBOL(add_watch_to_object);