]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Aug 2020 12:38:39 +0000 (14:38 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Aug 2020 12:38:39 +0000 (14:38 +0200)
added patches:
can-j1939-socket-j1939_sk_bind-make-sure-ml_priv-is-allocated.patch
can-j1939-transport-j1939_session_tx_dat-fix-use-after-free-read-in-j1939_tp_txtimer.patch
drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch
drm-amd-display-fix-pow-crashing-when-given-base-0.patch
drm-amdgpu-display-use-gfp_atomic-in-dcn20_validate_bandwidth_internal.patch
ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch
jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch
kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch
mm-include-cma-pages-in-lowmem_reserve-at-boot.patch
mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch
mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch
rdma-hfi1-correct-an-interlock-issue-for-tid-rdma-write-request.patch
romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch
scsi-zfcp-fix-use-after-free-in-request-timeout-handlers.patch
spi-prevent-adding-devices-below-an-unregistering-controller.patch
uprobes-__replace_page-avoid-bug-in-munlock_vma_page.patch

17 files changed:
queue-5.4/can-j1939-socket-j1939_sk_bind-make-sure-ml_priv-is-allocated.patch [new file with mode: 0644]
queue-5.4/can-j1939-transport-j1939_session_tx_dat-fix-use-after-free-read-in-j1939_tp_txtimer.patch [new file with mode: 0644]
queue-5.4/drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch [new file with mode: 0644]
queue-5.4/drm-amd-display-fix-pow-crashing-when-given-base-0.patch [new file with mode: 0644]
queue-5.4/drm-amdgpu-display-use-gfp_atomic-in-dcn20_validate_bandwidth_internal.patch [new file with mode: 0644]
queue-5.4/ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch [new file with mode: 0644]
queue-5.4/jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch [new file with mode: 0644]
queue-5.4/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch [new file with mode: 0644]
queue-5.4/mm-include-cma-pages-in-lowmem_reserve-at-boot.patch [new file with mode: 0644]
queue-5.4/mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch [new file with mode: 0644]
queue-5.4/mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch [new file with mode: 0644]
queue-5.4/rdma-hfi1-correct-an-interlock-issue-for-tid-rdma-write-request.patch [new file with mode: 0644]
queue-5.4/romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch [new file with mode: 0644]
queue-5.4/scsi-zfcp-fix-use-after-free-in-request-timeout-handlers.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/spi-prevent-adding-devices-below-an-unregistering-controller.patch [new file with mode: 0644]
queue-5.4/uprobes-__replace_page-avoid-bug-in-munlock_vma_page.patch [new file with mode: 0644]

diff --git a/queue-5.4/can-j1939-socket-j1939_sk_bind-make-sure-ml_priv-is-allocated.patch b/queue-5.4/can-j1939-socket-j1939_sk_bind-make-sure-ml_priv-is-allocated.patch
new file mode 100644 (file)
index 0000000..8a6f9fe
--- /dev/null
@@ -0,0 +1,46 @@
+From af804b7826350d5af728dca4715e473338fbd7e5 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <linux@rempel-privat.de>
+Date: Fri, 7 Aug 2020 12:51:58 +0200
+Subject: can: j1939: socket: j1939_sk_bind(): make sure ml_priv is allocated
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit af804b7826350d5af728dca4715e473338fbd7e5 upstream.
+
+This patch adds check to ensure that the struct net_device::ml_priv is
+allocated, as it is used later by the j1939 stack.
+
+The allocation is done by all mainline CAN network drivers, but when using
+bond or team devices this is not the case.
+
+Bail out if no ml_priv is allocated.
+
+Reported-by: syzbot+f03d384f3455d28833eb@syzkaller.appspotmail.com
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Cc: linux-stable <stable@vger.kernel.org> # >= v5.4
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20200807105200.26441-4-o.rempel@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/can/j1939/socket.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -466,6 +466,14 @@ static int j1939_sk_bind(struct socket *
+                       goto out_release_sock;
+               }
++              if (!ndev->ml_priv) {
++                      netdev_warn_once(ndev,
++                                       "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
++                      dev_put(ndev);
++                      ret = -ENODEV;
++                      goto out_release_sock;
++              }
++
+               priv = j1939_netdev_start(ndev);
+               dev_put(ndev);
+               if (IS_ERR(priv)) {
diff --git a/queue-5.4/can-j1939-transport-j1939_session_tx_dat-fix-use-after-free-read-in-j1939_tp_txtimer.patch b/queue-5.4/can-j1939-transport-j1939_session_tx_dat-fix-use-after-free-read-in-j1939_tp_txtimer.patch
new file mode 100644 (file)
index 0000000..b36c51c
--- /dev/null
@@ -0,0 +1,66 @@
+From cd3b3636c99fcac52c598b64061f3fe4413c6a12 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <linux@rempel-privat.de>
+Date: Fri, 7 Aug 2020 12:51:57 +0200
+Subject: can: j1939: transport: j1939_session_tx_dat(): fix use-after-free read in j1939_tp_txtimer()
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit cd3b3636c99fcac52c598b64061f3fe4413c6a12 upstream.
+
+The current stack implementation do not support ECTS requests of not
+aligned TP sized blocks.
+
+If ECTS will request a block with size and offset spanning two TP
+blocks, this will cause memcpy() to read beyond the queued skb (which
+does only contain one TP sized block).
+
+Sometimes KASAN will detect this read if the memory region beyond the
+skb was previously allocated and freed. In other situations it will stay
+undetected. The ETP transfer in any case will be corrupted.
+
+This patch adds a sanity check to avoid this kind of read and abort the
+session with error J1939_XTP_ABORT_ECTS_TOO_BIG.
+
+Reported-by: syzbot+5322482fe520b02aea30@syzkaller.appspotmail.com
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Cc: linux-stable <stable@vger.kernel.org> # >= v5.4
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20200807105200.26441-3-o.rempel@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/can/j1939/transport.c |   15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -787,6 +787,18 @@ static int j1939_session_tx_dat(struct j
+               if (len > 7)
+                       len = 7;
++              if (offset + len > se_skb->len) {
++                      netdev_err_once(priv->ndev,
++                                      "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
++                                      __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
++                      return -EOVERFLOW;
++              }
++
++              if (!len) {
++                      ret = -ENOBUFS;
++                      break;
++              }
++
+               memcpy(&dat[1], &tpdat[offset], len);
+               ret = j1939_tp_tx_dat(session, dat, len + 1);
+               if (ret < 0) {
+@@ -1120,6 +1132,9 @@ static enum hrtimer_restart j1939_tp_txt
+                * cleanup including propagation of the error to user space.
+                */
+               break;
++      case -EOVERFLOW:
++              j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
++              break;
+       case 0:
+               session->tx_retry = 0;
+               break;
diff --git a/queue-5.4/drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch b/queue-5.4/drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch
new file mode 100644 (file)
index 0000000..c80a694
--- /dev/null
@@ -0,0 +1,40 @@
+From b24bdc37d03a0478189e20a50286092840f414fa Mon Sep 17 00:00:00 2001
+From: Stylon Wang <stylon.wang@amd.com>
+Date: Tue, 28 Jul 2020 15:10:35 +0800
+Subject: drm/amd/display: Fix EDID parsing after resume from suspend
+
+From: Stylon Wang <stylon.wang@amd.com>
+
+commit b24bdc37d03a0478189e20a50286092840f414fa upstream.
+
+[Why]
+Resuming from suspend, CEA blocks from EDID are not parsed and no video
+modes can support YUV420. When this happens, output bpc cannot go over
+8-bit with 4K modes on HDMI.
+
+[How]
+In amdgpu_dm_update_connector_after_detect(), drm_add_edid_modes() is
+called after drm_connector_update_edid_property() to fully parse EDID
+and update display info.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Stylon Wang <stylon.wang@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1434,6 +1434,7 @@ amdgpu_dm_update_connector_after_detect(
+                       drm_connector_update_edid_property(connector,
+                                                          aconnector->edid);
++                      drm_add_edid_modes(connector, aconnector->edid);
+                       if (aconnector->dc_link->aux_mode)
+                               drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
diff --git a/queue-5.4/drm-amd-display-fix-pow-crashing-when-given-base-0.patch b/queue-5.4/drm-amd-display-fix-pow-crashing-when-given-base-0.patch
new file mode 100644 (file)
index 0000000..fe58232
--- /dev/null
@@ -0,0 +1,36 @@
+From d2e59d0ff4c44d1f6f8ed884a5bea7d1bb7fd98c Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Thu, 6 Aug 2020 17:54:47 -0400
+Subject: drm/amd/display: fix pow() crashing when given base 0
+
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+
+commit d2e59d0ff4c44d1f6f8ed884a5bea7d1bb7fd98c upstream.
+
+[Why&How]
+pow(a,x) is implemented as exp(x*log(a)). log(0) will crash.
+So return 0^x = 0, unless x=0, convention seems to be 0^0 = 1.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/include/fixed31_32.h |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fi
+  */
+ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
++      if (arg1.value == 0)
++              return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
++
+       return dc_fixpt_exp(
+               dc_fixpt_mul(
+                       dc_fixpt_log(arg1),
diff --git a/queue-5.4/drm-amdgpu-display-use-gfp_atomic-in-dcn20_validate_bandwidth_internal.patch b/queue-5.4/drm-amdgpu-display-use-gfp_atomic-in-dcn20_validate_bandwidth_internal.patch
new file mode 100644 (file)
index 0000000..888c739
--- /dev/null
@@ -0,0 +1,36 @@
+From f41ed88cbd6f025f7a683a11a74f901555fba11c Mon Sep 17 00:00:00 2001
+From: Daniel Kolesa <daniel@octaforge.org>
+Date: Sat, 8 Aug 2020 22:42:35 +0200
+Subject: drm/amdgpu/display: use GFP_ATOMIC in dcn20_validate_bandwidth_internal
+
+From: Daniel Kolesa <daniel@octaforge.org>
+
+commit f41ed88cbd6f025f7a683a11a74f901555fba11c upstream.
+
+GFP_KERNEL may and will sleep, and this is being executed in
+a non-preemptible context; this will mess things up since it's
+called inbetween DC_FP_START/END, and rescheduling will result
+in the DC_FP_END later being called in a different context (or
+just crashing if any floating point/vector registers/instructions
+are used after the call is resumed in a different context).
+
+Signed-off-by: Daniel Kolesa <daniel@octaforge.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -2845,7 +2845,7 @@ static bool dcn20_validate_bandwidth_int
+       int vlevel = 0;
+       int pipe_split_from[MAX_PIPES];
+       int pipe_cnt = 0;
+-      display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++      display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+       DC_LOGGER_INIT(dc->ctx->logger);
+       BW_VAL_TRACE_COUNT();
diff --git a/queue-5.4/ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch b/queue-5.4/ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch
new file mode 100644 (file)
index 0000000..189e49a
--- /dev/null
@@ -0,0 +1,56 @@
+From 7303cb5bfe845f7d43cd9b2dbd37dbb266efda9b Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 31 Jul 2020 18:21:35 +0200
+Subject: ext4: fix checking of directory entry validity for inline directories
+
+From: Jan Kara <jack@suse.cz>
+
+commit 7303cb5bfe845f7d43cd9b2dbd37dbb266efda9b upstream.
+
+ext4_search_dir() and ext4_generic_delete_entry() can be called both for
+standard director blocks and for inline directories stored inside inode
+or inline xattr space. For the second case we didn't call
+ext4_check_dir_entry() with proper constraints that could result in
+accepting corrupted directory entry as well as false positive filesystem
+errors like:
+
+EXT4-fs error (device dm-0): ext4_search_dir:1395: inode #28320400:
+block 113246792: comm dockerd: bad entry in directory: directory entry too
+close to block end - offset=0, inode=28320403, rec_len=32, name_len=8,
+size=4096
+
+Fix the arguments passed to ext4_check_dir_entry().
+
+Fixes: 109ba779d6cc ("ext4: check for directory entries too close to block end")
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20200731162135.8080-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/namei.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1392,8 +1392,8 @@ int ext4_search_dir(struct buffer_head *
+                   ext4_match(dir, fname, de)) {
+                       /* found a match - just to be sure, do
+                        * a full check */
+-                      if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
+-                                               bh->b_size, offset))
++                      if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
++                                               buf_size, offset))
+                               return -1;
+                       *res_dir = de;
+                       return 1;
+@@ -2462,7 +2462,7 @@ int ext4_generic_delete_entry(handle_t *
+       de = (struct ext4_dir_entry_2 *)entry_buf;
+       while (i < buf_size - csum_size) {
+               if (ext4_check_dir_entry(dir, NULL, de, bh,
+-                                       bh->b_data, bh->b_size, i))
++                                       entry_buf, buf_size, i))
+                       return -EFSCORRUPTED;
+               if (de == de_del)  {
+                       if (pde)
diff --git a/queue-5.4/jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch b/queue-5.4/jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch
new file mode 100644 (file)
index 0000000..751cd5a
--- /dev/null
@@ -0,0 +1,39 @@
+From ef3f5830b859604eda8723c26d90ab23edc027a4 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Sat, 20 Jun 2020 14:19:48 +0800
+Subject: jbd2: add the missing unlock_buffer() in the error path of jbd2_write_superblock()
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit ef3f5830b859604eda8723c26d90ab23edc027a4 upstream.
+
+jbd2_write_superblock() is under the buffer lock of journal superblock
+before ending that superblock write, so add a missing unlock_buffer() in
+in the error path before submitting buffer.
+
+Fixes: 742b06b5628f ("jbd2: check superblock mapped prior to committing")
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/r/20200620061948.2049579-1-yi.zhang@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd2/journal.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1348,8 +1348,10 @@ static int jbd2_write_superblock(journal
+       int ret;
+       /* Buffer got discarded which means block device got invalidated */
+-      if (!buffer_mapped(bh))
++      if (!buffer_mapped(bh)) {
++              unlock_buffer(bh);
+               return -EIO;
++      }
+       trace_jbd2_write_superblock(journal, write_flags);
+       if (!(journal->j_flags & JBD2_BARRIER))
diff --git a/queue-5.4/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch b/queue-5.4/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch
new file mode 100644 (file)
index 0000000..ab5bfb6
--- /dev/null
@@ -0,0 +1,65 @@
+From 71e843295c680898959b22dc877ae3839cc22470 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Thu, 20 Aug 2020 17:42:14 -0700
+Subject: kernel/relay.c: fix memleak on destroy relay channel
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit 71e843295c680898959b22dc877ae3839cc22470 upstream.
+
+kmemleak report memory leak as follows:
+
+  unreferenced object 0x607ee4e5f948 (size 8):
+  comm "syz-executor.1", pid 2098, jiffies 4295031601 (age 288.468s)
+  hex dump (first 8 bytes):
+  00 00 00 00 00 00 00 00 ........
+  backtrace:
+     relay_open kernel/relay.c:583 [inline]
+     relay_open+0xb6/0x970 kernel/relay.c:563
+     do_blk_trace_setup+0x4a8/0xb20 kernel/trace/blktrace.c:557
+     __blk_trace_setup+0xb6/0x150 kernel/trace/blktrace.c:597
+     blk_trace_ioctl+0x146/0x280 kernel/trace/blktrace.c:738
+     blkdev_ioctl+0xb2/0x6a0 block/ioctl.c:613
+     block_ioctl+0xe5/0x120 fs/block_dev.c:1871
+     vfs_ioctl fs/ioctl.c:48 [inline]
+     __do_sys_ioctl fs/ioctl.c:753 [inline]
+     __se_sys_ioctl fs/ioctl.c:739 [inline]
+     __x64_sys_ioctl+0x170/0x1ce fs/ioctl.c:739
+     do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46
+     entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+'chan->buf' is malloced in relay_open() by alloc_percpu() but not free
+while destroy the relay channel.  Fix it by adding free_percpu() before
+return from relay_destroy_channel().
+
+Fixes: 017c59c042d0 ("relay: Use per CPU constructs for the relay channel buffer pointers")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Michel Lespinasse <walken@google.com>
+Cc: Daniel Axtens <dja@axtens.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Akash Goel <akash.goel@intel.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200817122826.48518-1-weiyongjun1@huawei.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/relay.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -197,6 +197,7 @@ free_buf:
+ static void relay_destroy_channel(struct kref *kref)
+ {
+       struct rchan *chan = container_of(kref, struct rchan, kref);
++      free_percpu(chan->buf);
+       kfree(chan);
+ }
diff --git a/queue-5.4/mm-include-cma-pages-in-lowmem_reserve-at-boot.patch b/queue-5.4/mm-include-cma-pages-in-lowmem_reserve-at-boot.patch
new file mode 100644 (file)
index 0000000..257a4da
--- /dev/null
@@ -0,0 +1,85 @@
+From e08d3fdfe2dafa0331843f70ce1ff6c1c4900bf4 Mon Sep 17 00:00:00 2001
+From: Doug Berger <opendmb@gmail.com>
+Date: Thu, 20 Aug 2020 17:42:24 -0700
+Subject: mm: include CMA pages in lowmem_reserve at boot
+
+From: Doug Berger <opendmb@gmail.com>
+
+commit e08d3fdfe2dafa0331843f70ce1ff6c1c4900bf4 upstream.
+
+The lowmem_reserve arrays provide a means of applying pressure against
+allocations from lower zones that were targeted at higher zones.  Its
+values are a function of the number of pages managed by higher zones and
+are assigned by a call to the setup_per_zone_lowmem_reserve() function.
+
+The function is initially called at boot time by the function
+init_per_zone_wmark_min() and may be called later by accesses of the
+/proc/sys/vm/lowmem_reserve_ratio sysctl file.
+
+The function init_per_zone_wmark_min() was moved up from a module_init to
+a core_initcall to resolve a sequencing issue with khugepaged.
+Unfortunately this created a sequencing issue with CMA page accounting.
+
+The CMA pages are added to the managed page count of a zone when
+cma_init_reserved_areas() is called at boot also as a core_initcall.  This
+makes it uncertain whether the CMA pages will be added to the managed page
+counts of their zones before or after the call to
+init_per_zone_wmark_min() as it becomes dependent on link order.  With the
+current link order the pages are added to the managed count after the
+lowmem_reserve arrays are initialized at boot.
+
+This means the lowmem_reserve values at boot may be lower than the values
+used later if /proc/sys/vm/lowmem_reserve_ratio is accessed even if the
+ratio values are unchanged.
+
+In many cases the difference is not significant, but for example
+an ARM platform with 1GB of memory and the following memory layout
+
+  cma: Reserved 256 MiB at 0x0000000030000000
+  Zone ranges:
+    DMA      [mem 0x0000000000000000-0x000000002fffffff]
+    Normal   empty
+    HighMem  [mem 0x0000000030000000-0x000000003fffffff]
+
+would result in 0 lowmem_reserve for the DMA zone.  This would allow
+userspace to deplete the DMA zone easily.
+
+Funnily enough
+
+  $ cat /proc/sys/vm/lowmem_reserve_ratio
+
+would fix up the situation because as a side effect it forces
+setup_per_zone_lowmem_reserve.
+
+This commit breaks the link order dependency by invoking
+init_per_zone_wmark_min() as a postcore_initcall so that the CMA pages
+have the chance to be properly accounted in their zone(s) and allowing
+the lowmem_reserve arrays to receive consistent values.
+
+Fixes: bc22af74f271 ("mm: update min_free_kbytes from khugepaged after core initialization")
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/1597423766-27849-1-git-send-email-opendmb@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -7867,7 +7867,7 @@ int __meminit init_per_zone_wmark_min(vo
+       return 0;
+ }
+-core_initcall(init_per_zone_wmark_min)
++postcore_initcall(init_per_zone_wmark_min)
+ /*
+  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
diff --git a/queue-5.4/mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch b/queue-5.4/mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch
new file mode 100644 (file)
index 0000000..b4e28fc
--- /dev/null
@@ -0,0 +1,52 @@
+From b7333b58f358f38d90d78e00c1ee5dec82df10ad Mon Sep 17 00:00:00 2001
+From: Yang Shi <shy828301@gmail.com>
+Date: Fri, 14 Aug 2020 21:30:41 -0700
+Subject: mm/memory.c: skip spurious TLB flush for retried page fault
+
+From: Yang Shi <shy828301@gmail.com>
+
+commit b7333b58f358f38d90d78e00c1ee5dec82df10ad upstream.
+
+Recently we found regression when running will_it_scale/page_fault3 test
+on ARM64.  Over 70% down for the multi processes cases and over 20% down
+for the multi threads cases.  It turns out the regression is caused by
+commit 89b15332af7c ("mm: drop mmap_sem before calling
+balance_dirty_pages() in write fault").
+
+The test mmaps a memory size file then write to the mapping, this would
+make all memory dirty and trigger dirty pages throttle, that upstream
+commit would release mmap_sem then retry the page fault.  The retried
+page fault would see correct PTEs installed then just fall through to
+spurious TLB flush.  The regression is caused by the excessive spurious
+TLB flush.  It is fine on x86 since x86's spurious TLB flush is no-op.
+
+We could just skip the spurious TLB flush to mitigate the regression.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: Xu Yu <xuyu@linux.alibaba.com>
+Debugged-by: Xu Yu <xuyu@linux.alibaba.com>
+Tested-by: Xu Yu <xuyu@linux.alibaba.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Yang Shi <shy828301@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3886,6 +3886,9 @@ static vm_fault_t handle_pte_fault(struc
+                               vmf->flags & FAULT_FLAG_WRITE)) {
+               update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
+       } else {
++              /* Skip spurious TLB flush for retried page fault */
++              if (vmf->flags & FAULT_FLAG_TRIED)
++                      goto unlock;
+               /*
+                * This is needed only for protection faults but the arch code
+                * is not yet telling us if this is a protection fault or not.
diff --git a/queue-5.4/mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch b/queue-5.4/mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch
new file mode 100644 (file)
index 0000000..92d0e7d
--- /dev/null
@@ -0,0 +1,100 @@
+From 88e8ac11d2ea3acc003cf01bb5a38c8aa76c3cfd Mon Sep 17 00:00:00 2001
+From: Charan Teja Reddy <charante@codeaurora.org>
+Date: Thu, 20 Aug 2020 17:42:27 -0700
+Subject: mm, page_alloc: fix core hung in free_pcppages_bulk()
+
+From: Charan Teja Reddy <charante@codeaurora.org>
+
+commit 88e8ac11d2ea3acc003cf01bb5a38c8aa76c3cfd upstream.
+
+The following race is observed with the repeated online, offline and a
+delay between two successive online of memory blocks of movable zone.
+
+P1                                             P2
+
+Online the first memory block in
+the movable zone. The pcp struct
+values are initialized to default
+values,i.e., pcp->high = 0 &
+pcp->batch = 1.
+
+                                       Allocate the pages from the
+                                       movable zone.
+
+Try to Online the second memory
+block in the movable zone thus it
+entered the online_pages() but yet
+to call zone_pcp_update().
+                                       This process is entered into
+                                       the exit path thus it tries
+                                       to release the order-0 pages
+                                       to pcp lists through
+                                       free_unref_page_commit().
+                                       As pcp->high = 0, pcp->count = 1
+                                       proceed to call the function
+                                       free_pcppages_bulk().
+Update the pcp values thus the
+new pcp values are like, say,
+pcp->high = 378, pcp->batch = 63.
+                                       Read the pcp's batch value using
+                                       READ_ONCE() and pass the same to
+                                       free_pcppages_bulk(), pcp values
+                                       passed here are, batch = 63,
+                                       count = 1.
+
+                                       Since num of pages in the pcp
+                                       lists are less than ->batch,
+                                       then it will stuck in
+                                       while(list_empty(list)) loop
+                                       with interrupts disabled thus
+                                       a core hung.
+
+Avoid this by ensuring free_pcppages_bulk() is called with proper count of
+pcp list pages.
+
+The mentioned race is some what easily reproducible without [1] because
+pcp's are not updated for the first memory block online and thus there is
+a enough race window for P2 between alloc+free and pcp struct values
+update through onlining of second memory block.
+
+With [1], the race still exists but it is very narrow as we update the pcp
+struct values for the first memory block online itself.
+
+This is not limited to the movable zone, it could also happen in cases
+with the normal zone (e.g., hotplug to a node that only has DMA memory, or
+no other memory yet).
+
+[1]: https://patchwork.kernel.org/patch/11696389/
+
+Fixes: 5f8dcc21211a ("page-allocator: split per-cpu list into one-list-per-migrate-type")
+Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Vinayak Menon <vinmenon@codeaurora.org>
+Cc: <stable@vger.kernel.org> [2.6+]
+Link: http://lkml.kernel.org/r/1597150703-19003-1-git-send-email-charante@codeaurora.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1256,6 +1256,11 @@ static void free_pcppages_bulk(struct zo
+       struct page *page, *tmp;
+       LIST_HEAD(head);
++      /*
++       * Ensure proper count is passed which otherwise would stuck in the
++       * below while (list_empty(list)) loop.
++       */
++      count = min(pcp->count, count);
+       while (count) {
+               struct list_head *list;
diff --git a/queue-5.4/rdma-hfi1-correct-an-interlock-issue-for-tid-rdma-write-request.patch b/queue-5.4/rdma-hfi1-correct-an-interlock-issue-for-tid-rdma-write-request.patch
new file mode 100644 (file)
index 0000000..b253f3a
--- /dev/null
@@ -0,0 +1,63 @@
+From b25e8e85e75a61af1ddc88c4798387dd3132dd43 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Tue, 11 Aug 2020 13:49:31 -0400
+Subject: RDMA/hfi1: Correct an interlock issue for TID RDMA WRITE request
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit b25e8e85e75a61af1ddc88c4798387dd3132dd43 upstream.
+
+The following message occurs when running an AI application with TID RDMA
+enabled:
+
+hfi1 0000:7f:00.0: hfi1_0: [QP74] hfi1_tid_timeout 4084
+hfi1 0000:7f:00.0: hfi1_0: [QP70] hfi1_tid_timeout 4084
+
+The issue happens when TID RDMA WRITE request is followed by an
+IB_WR_RDMA_WRITE_WITH_IMM request, the latter could be completed first on
+the responder side. As a result, no ACK packet for the latter could be
+sent because the TID RDMA WRITE request is still being processed on the
+responder side.
+
+When the TID RDMA WRITE request is eventually completed, the requester
+will wait for the IB_WR_RDMA_WRITE_WITH_IMM request to be acknowledged.
+
+If the next request is another TID RDMA WRITE request, no TID RDMA WRITE
+DATA packet could be sent because the preceding IB_WR_RDMA_WRITE_WITH_IMM
+request is not completed yet.
+
+Consequently the IB_WR_RDMA_WRITE_WITH_IMM will be retried but it will be
+ignored on the responder side because the responder thinks it has already
+been completed. Eventually the retry will be exhausted and the qp will be
+put into error state on the requester side. On the responder side, the TID
+resource timer will eventually expire because no TID RDMA WRITE DATA
+packets will be received for the second TID RDMA WRITE request.  There is
+also risk of a write-after-write memory corruption due to the issue.
+
+Fix by adding a requester side interlock to prevent any potential data
+corruption and TID RDMA protocol error.
+
+Fixes: a0b34f75ec20 ("IB/hfi1: Add interlock between a TID RDMA request and other requests")
+Link: https://lore.kernel.org/r/20200811174931.191210.84093.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org> # 5.4.x+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/tid_rdma.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct
+       case IB_WR_ATOMIC_CMP_AND_SWP:
+       case IB_WR_ATOMIC_FETCH_AND_ADD:
+       case IB_WR_RDMA_WRITE:
++      case IB_WR_RDMA_WRITE_WITH_IMM:
+               switch (prev->wr.opcode) {
+               case IB_WR_TID_RDMA_WRITE:
+                       req = wqe_to_tid_req(prev);
diff --git a/queue-5.4/romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch b/queue-5.4/romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch
new file mode 100644 (file)
index 0000000..8030d99
--- /dev/null
@@ -0,0 +1,55 @@
+From bcf85fcedfdd17911982a3e3564fcfec7b01eebd Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Thu, 20 Aug 2020 17:42:11 -0700
+Subject: romfs: fix uninitialized memory leak in romfs_dev_read()
+
+From: Jann Horn <jannh@google.com>
+
+commit bcf85fcedfdd17911982a3e3564fcfec7b01eebd upstream.
+
+romfs has a superblock field that limits the size of the filesystem; data
+beyond that limit is never accessed.
+
+romfs_dev_read() fetches a caller-supplied number of bytes from the
+backing device.  It returns 0 on success or an error code on failure;
+therefore, its API can't represent short reads, it's all-or-nothing.
+
+However, when romfs_dev_read() detects that the requested operation would
+cross the filesystem size limit, it currently silently truncates the
+requested number of bytes.  This e.g.  means that when the content of a
+file with size 0x1000 starts one byte before the filesystem size limit,
+->readpage() will only fill a single byte of the supplied page while
+leaving the rest uninitialized, leaking that uninitialized memory to
+userspace.
+
+Fix it by returning an error code instead of truncating the read when the
+requested read operation would go beyond the end of the filesystem.
+
+Fixes: da4458bda237 ("NOMMU: Make it possible for RomFS to use MTD devices directly")
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: David Howells <dhowells@redhat.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200818013202.2246365-1-jannh@google.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/romfs/storage.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/romfs/storage.c
++++ b/fs/romfs/storage.c
+@@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *s
+       size_t limit;
+       limit = romfs_maxsize(sb);
+-      if (pos >= limit)
++      if (pos >= limit || buflen > limit - pos)
+               return -EIO;
+-      if (buflen > limit - pos)
+-              buflen = limit - pos;
+ #ifdef CONFIG_ROMFS_ON_MTD
+       if (sb->s_mtd)
diff --git a/queue-5.4/scsi-zfcp-fix-use-after-free-in-request-timeout-handlers.patch b/queue-5.4/scsi-zfcp-fix-use-after-free-in-request-timeout-handlers.patch
new file mode 100644 (file)
index 0000000..35e39c8
--- /dev/null
@@ -0,0 +1,85 @@
+From 2d9a2c5f581be3991ba67fa9e7497c711220ea8e Mon Sep 17 00:00:00 2001
+From: Steffen Maier <maier@linux.ibm.com>
+Date: Thu, 13 Aug 2020 17:28:56 +0200
+Subject: scsi: zfcp: Fix use-after-free in request timeout handlers
+
+From: Steffen Maier <maier@linux.ibm.com>
+
+commit 2d9a2c5f581be3991ba67fa9e7497c711220ea8e upstream.
+
+Before v4.15 commit 75492a51568b ("s390/scsi: Convert timers to use
+timer_setup()"), we intentionally only passed zfcp_adapter as context
+argument to zfcp_fsf_request_timeout_handler(). Since we only trigger
+adapter recovery, it was unnecessary to sync against races between timeout
+and (late) completion.  Likewise, we only passed zfcp_erp_action as context
+argument to zfcp_erp_timeout_handler(). Since we only wakeup an ERP action,
+it was unnecessary to sync against races between timeout and (late)
+completion.
+
+Meanwhile the timeout handlers get timer_list as context argument and do a
+timer-specific container-of to zfcp_fsf_req which can have been freed.
+
+Fix it by making sure that any request timeout handlers, that might just
+have started before del_timer(), are completed by using del_timer_sync()
+instead. This ensures the request free happens afterwards.
+
+Space time diagram of potential use-after-free:
+
+Basic idea is to have 2 or more pending requests whose timeouts run out at
+almost the same time.
+
+req 1 timeout     ERP thread        req 2 timeout
+----------------  ----------------  ---------------------------------------
+zfcp_fsf_request_timeout_handler
+fsf_req = from_timer(fsf_req, t, timer)
+adapter = fsf_req->adapter
+zfcp_qdio_siosl(adapter)
+zfcp_erp_adapter_reopen(adapter,...)
+                  zfcp_erp_strategy
+                  ...
+                  zfcp_fsf_req_dismiss_all
+                  list_for_each_entry_safe
+                    zfcp_fsf_req_complete 1
+                    del_timer 1
+                    zfcp_fsf_req_free 1
+                    zfcp_fsf_req_complete 2
+                                    zfcp_fsf_request_timeout_handler
+                    del_timer 2
+                                    fsf_req = from_timer(fsf_req, t, timer)
+                    zfcp_fsf_req_free 2
+                                    adapter = fsf_req->adapter
+                                              ^^^^^^^ already freed
+
+Link: https://lore.kernel.org/r/20200813152856.50088-1-maier@linux.ibm.com
+Fixes: 75492a51568b ("s390/scsi: Convert timers to use timer_setup()")
+Cc: <stable@vger.kernel.org> #4.15+
+Suggested-by: Julian Wiedmann <jwi@linux.ibm.com>
+Reviewed-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: Steffen Maier <maier@linux.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/scsi/zfcp_fsf.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -409,7 +409,7 @@ static void zfcp_fsf_req_complete(struct
+               return;
+       }
+-      del_timer(&req->timer);
++      del_timer_sync(&req->timer);
+       zfcp_fsf_protstatus_eval(req);
+       zfcp_fsf_fsfstatus_eval(req);
+       req->handler(req);
+@@ -762,7 +762,7 @@ static int zfcp_fsf_req_send(struct zfcp
+       req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
+       req->issued = get_tod_clock();
+       if (zfcp_qdio_send(qdio, &req->qdio_req)) {
+-              del_timer(&req->timer);
++              del_timer_sync(&req->timer);
+               /* lookup request again, list might have changed */
+               zfcp_reqlist_find_rm(adapter->req_list, req_id);
+               zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
index dbc142a524be92a534d5f2d38146e08550f022cc..a96928d08caddef0deca5f9acdd7dc9ce21e4cf5 100644 (file)
@@ -20,3 +20,19 @@ btrfs-return-erofs-for-btrfs_fs_state_error-cases.patch
 btrfs-add-wrapper-for-transaction-abort-predicate.patch
 alsa-hda-realtek-add-quirk-for-samsung-galaxy-flex-book.patch
 alsa-hda-realtek-add-quirk-for-samsung-galaxy-book-ion.patch
+can-j1939-transport-j1939_session_tx_dat-fix-use-after-free-read-in-j1939_tp_txtimer.patch
+can-j1939-socket-j1939_sk_bind-make-sure-ml_priv-is-allocated.patch
+spi-prevent-adding-devices-below-an-unregistering-controller.patch
+romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch
+kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch
+uprobes-__replace_page-avoid-bug-in-munlock_vma_page.patch
+mm-include-cma-pages-in-lowmem_reserve-at-boot.patch
+mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch
+rdma-hfi1-correct-an-interlock-issue-for-tid-rdma-write-request.patch
+ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch
+jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch
+scsi-zfcp-fix-use-after-free-in-request-timeout-handlers.patch
+mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch
+drm-amdgpu-display-use-gfp_atomic-in-dcn20_validate_bandwidth_internal.patch
+drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch
+drm-amd-display-fix-pow-crashing-when-given-base-0.patch
diff --git a/queue-5.4/spi-prevent-adding-devices-below-an-unregistering-controller.patch b/queue-5.4/spi-prevent-adding-devices-below-an-unregistering-controller.patch
new file mode 100644 (file)
index 0000000..de5ff62
--- /dev/null
@@ -0,0 +1,109 @@
+From ddf75be47ca748f8b12d28ac64d624354fddf189 Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Mon, 3 Aug 2020 13:09:01 +0200
+Subject: spi: Prevent adding devices below an unregistering controller
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit ddf75be47ca748f8b12d28ac64d624354fddf189 upstream.
+
+CONFIG_OF_DYNAMIC and CONFIG_ACPI allow adding SPI devices at runtime
+using a DeviceTree overlay or DSDT patch.  CONFIG_SPI_SLAVE allows the
+same via sysfs.
+
+But there are no precautions to prevent adding a device below a
+controller that's being removed.  Such a device is unusable and may not
+even be able to unbind cleanly as it becomes inaccessible once the
+controller has been torn down.  E.g. it is then impossible to quiesce
+the device's interrupt.
+
+of_spi_notify() and acpi_spi_notify() do hold a ref on the controller,
+but otherwise run lockless against spi_unregister_controller().
+
+Fix by holding the spi_add_lock in spi_unregister_controller() and
+bailing out of spi_add_device() if the controller has been unregistered
+concurrently.
+
+Fixes: ce79d54ae447 ("spi/of: Add OF notifier handler")
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org # v3.19+
+Cc: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: Octavian Purdila <octavian.purdila@intel.com>
+Cc: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+Link: https://lore.kernel.org/r/a8c3205088a969dc8410eec1eba9aface60f36af.1596451035.git.lukas@wunner.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/Kconfig |    3 +++
+ drivers/spi/spi.c   |   21 ++++++++++++++++++++-
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -944,4 +944,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
+ endif # SPI_SLAVE
++config SPI_DYNAMIC
++      def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
++
+ endif # SPI
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
+  */
+ static DEFINE_MUTEX(board_lock);
++/*
++ * Prevents addition of devices with same chip select and
++ * addition of devices below an unregistering controller.
++ */
++static DEFINE_MUTEX(spi_add_lock);
++
+ /**
+  * spi_alloc_device - Allocate a new SPI device
+  * @ctlr: Controller to which device is connected
+@@ -553,7 +559,6 @@ static int spi_dev_check(struct device *
+  */
+ int spi_add_device(struct spi_device *spi)
+ {
+-      static DEFINE_MUTEX(spi_add_lock);
+       struct spi_controller *ctlr = spi->controller;
+       struct device *dev = ctlr->dev.parent;
+       int status;
+@@ -581,6 +586,13 @@ int spi_add_device(struct spi_device *sp
+               goto done;
+       }
++      /* Controller may unregister concurrently */
++      if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
++          !device_is_registered(&ctlr->dev)) {
++              status = -ENODEV;
++              goto done;
++      }
++
+       /* Descriptors take precedence */
+       if (ctlr->cs_gpiods)
+               spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
+@@ -2582,6 +2594,10 @@ void spi_unregister_controller(struct sp
+       struct spi_controller *found;
+       int id = ctlr->bus_num;
++      /* Prevent addition of new devices, unregister existing ones */
++      if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++              mutex_lock(&spi_add_lock);
++
+       device_for_each_child(&ctlr->dev, NULL, __unregister);
+       /* First make sure that this controller was ever added */
+@@ -2602,6 +2618,9 @@ void spi_unregister_controller(struct sp
+       if (found == ctlr)
+               idr_remove(&spi_master_idr, id);
+       mutex_unlock(&board_lock);
++
++      if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++              mutex_unlock(&spi_add_lock);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/queue-5.4/uprobes-__replace_page-avoid-bug-in-munlock_vma_page.patch b/queue-5.4/uprobes-__replace_page-avoid-bug-in-munlock_vma_page.patch
new file mode 100644 (file)
index 0000000..e083607
--- /dev/null
@@ -0,0 +1,43 @@
+From c17c3dc9d08b9aad9a55a1e53f205187972f448e Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 20 Aug 2020 17:42:17 -0700
+Subject: uprobes: __replace_page() avoid BUG in munlock_vma_page()
+
+From: Hugh Dickins <hughd@google.com>
+
+commit c17c3dc9d08b9aad9a55a1e53f205187972f448e upstream.
+
+syzbot crashed on the VM_BUG_ON_PAGE(PageTail) in munlock_vma_page(), when
+called from uprobes __replace_page().  Which of many ways to fix it?
+Settled on not calling when PageCompound (since Head and Tail are equals
+in this context, PageCompound the usual check in uprobes.c, and the prior
+use of FOLL_SPLIT_PMD will have cleared PageMlocked already).
+
+Fixes: 5a52c9df62b4 ("uprobe: use FOLL_SPLIT_PMD instead of FOLL_SPLIT")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Acked-by: Song Liu <songliubraving@fb.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>   [5.4+]
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008161338360.20413@eggly.anvils
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/uprobes.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -211,7 +211,7 @@ static int __replace_page(struct vm_area
+               try_to_free_swap(old_page);
+       page_vma_mapped_walk_done(&pvmw);
+-      if (vma->vm_flags & VM_LOCKED)
++      if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
+               munlock_vma_page(old_page);
+       put_page(old_page);