From: Greg Kroah-Hartman Date: Mon, 15 Nov 2021 13:49:47 +0000 (+0100) Subject: 5.15-stable patches X-Git-Tag: v5.4.160~53 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=3f76d95524cab6c9a94e9314bc11fecc0a4050e6;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: block-hold-invalidate_lock-in-blkdiscard-ioctl.patch block-hold-invalidate_lock-in-blkresetzone-ioctl.patch block-hold-invalidate_lock-in-blkzeroout-ioctl.patch ceph-fix-mdsmap-decode-when-there-are-mds-s-beyond-max_mds.patch dmaengine-bestcomm-fix-system-boot-lockups.patch dmaengine-ti-k3-udma-set-bchan-to-null-if-a-channel-request-fail.patch dmaengine-ti-k3-udma-set-r-tchan-or-rflow-to-null-if-request-fail.patch drm-i915-guc-fix-blocked-context-accounting.patch erofs-fix-unsafe-pagevec-reuse-of-hooked-pclusters.patch f2fs-fix-uaf-in-f2fs_available_free_memory.patch f2fs-include-non-compressed-blocks-in-compr_written_block.patch f2fs-should-use-gfp_nofs-for-directory-inodes.patch irqchip-sifive-plic-fixup-eoi-failed-when-masked.patch ksmbd-don-t-need-8byte-alignment-for-request-length-in-ksmbd_check_message.patch ksmbd-fix-buffer-length-check-in-fsctl_validate_negotiate_info.patch posix-cpu-timers-clear-task-posix_cputimers_work-in-copy_process.patch --- diff --git a/queue-5.15/block-hold-invalidate_lock-in-blkdiscard-ioctl.patch b/queue-5.15/block-hold-invalidate_lock-in-blkdiscard-ioctl.patch new file mode 100644 index 00000000000..8979e9dc4b1 --- /dev/null +++ b/queue-5.15/block-hold-invalidate_lock-in-blkdiscard-ioctl.patch @@ -0,0 +1,59 @@ +From 7607c44c157d343223510c8ffdf7206fdd2a6213 Mon Sep 17 00:00:00 2001 +From: Shin'ichiro Kawasaki +Date: Tue, 9 Nov 2021 19:47:22 +0900 +Subject: block: Hold invalidate_lock in BLKDISCARD ioctl + +From: Shin'ichiro Kawasaki + +commit 7607c44c157d343223510c8ffdf7206fdd2a6213 upstream. + +When BLKDISCARD ioctl and data read race, the data read leaves stale +page cache. To avoid the stale page cache, hold invalidate_lock of the +block device file mapping. The stale page cache is observed when +blktests test case block/009 is repeated hundreds of times. + +This patch can be applied back to the stable kernel version v5.15.y +with slight patch edit. Rework is required for older stable kernels. + +Fixes: 351499a172c0 ("block: Invalidate cache on discard v2") +Signed-off-by: Shin'ichiro Kawasaki +Cc: stable@vger.kernel.org # v5.15 +Reviewed-by: Jan Kara +Link: https://lore.kernel.org/r/20211109104723.835533-2-shinichiro.kawasaki@wdc.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + block/ioctl.c | 12 +++++++++--- + 1 file changed, 9 insertions(+), 3 deletions(-) + +--- a/block/ioctl.c ++++ b/block/ioctl.c +@@ -113,6 +113,7 @@ static int blk_ioctl_discard(struct bloc + uint64_t range[2]; + uint64_t start, len; + struct request_queue *q = bdev_get_queue(bdev); ++ struct inode *inode = bdev->bd_inode; + int err; + + if (!(mode & FMODE_WRITE)) +@@ -135,12 +136,17 @@ static int blk_ioctl_discard(struct bloc + if (start + len > i_size_read(bdev->bd_inode)) + return -EINVAL; + ++ filemap_invalidate_lock(inode->i_mapping); + err = truncate_bdev_range(bdev, mode, start, start + len - 1); + if (err) +- return err; ++ goto fail; + +- return blkdev_issue_discard(bdev, start >> 9, len >> 9, +- GFP_KERNEL, flags); ++ err = blkdev_issue_discard(bdev, start >> 9, len >> 9, ++ GFP_KERNEL, flags); ++ ++fail: ++ filemap_invalidate_unlock(inode->i_mapping); ++ return err; + } + + static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, diff --git a/queue-5.15/block-hold-invalidate_lock-in-blkresetzone-ioctl.patch b/queue-5.15/block-hold-invalidate_lock-in-blkresetzone-ioctl.patch new file mode 100644 index 00000000000..61a75b986dd --- /dev/null +++ b/queue-5.15/block-hold-invalidate_lock-in-blkresetzone-ioctl.patch @@ -0,0 +1,61 @@ +From 86399ea071099ec8ee0a83ac9ad67f7df96a50ad Mon Sep 17 00:00:00 2001 +From: Shin'ichiro Kawasaki +Date: Thu, 11 Nov 2021 17:52:38 +0900 +Subject: block: Hold invalidate_lock in BLKRESETZONE ioctl + +From: Shin'ichiro Kawasaki + +commit 86399ea071099ec8ee0a83ac9ad67f7df96a50ad upstream. + +When BLKRESETZONE ioctl and data read race, the data read leaves stale +page cache. The commit e5113505904e ("block: Discard page cache of zone +reset target range") added page cache truncation to avoid stale page +cache after the ioctl. However, the stale page cache still can be read +during the reset zone operation for the ioctl. To avoid the stale page +cache completely, hold invalidate_lock of the block device file mapping. + +Fixes: e5113505904e ("block: Discard page cache of zone reset target range") +Signed-off-by: Shin'ichiro Kawasaki +Cc: stable@vger.kernel.org # v5.15 +Reviewed-by: Jan Kara +Reviewed-by: Ming Lei +Link: https://lore.kernel.org/r/20211111085238.942492-1-shinichiro.kawasaki@wdc.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + block/blk-zoned.c | 15 +++++---------- + 1 file changed, 5 insertions(+), 10 deletions(-) + +--- a/block/blk-zoned.c ++++ b/block/blk-zoned.c +@@ -429,9 +429,10 @@ int blkdev_zone_mgmt_ioctl(struct block_ + op = REQ_OP_ZONE_RESET; + + /* Invalidate the page cache, including dirty pages. */ ++ filemap_invalidate_lock(bdev->bd_inode->i_mapping); + ret = blkdev_truncate_zone_range(bdev, mode, &zrange); + if (ret) +- return ret; ++ goto fail; + break; + case BLKOPENZONE: + op = REQ_OP_ZONE_OPEN; +@@ -449,15 +450,9 @@ int blkdev_zone_mgmt_ioctl(struct block_ + ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, + GFP_KERNEL); + +- /* +- * Invalidate the page cache again for zone reset: writes can only be +- * direct for zoned devices so concurrent writes would not add any page +- * to the page cache after/during reset. The page cache may be filled +- * again due to concurrent reads though and dropping the pages for +- * these is fine. +- */ +- if (!ret && cmd == BLKRESETZONE) +- ret = blkdev_truncate_zone_range(bdev, mode, &zrange); ++fail: ++ if (cmd == BLKRESETZONE) ++ filemap_invalidate_unlock(bdev->bd_inode->i_mapping); + + return ret; + } diff --git a/queue-5.15/block-hold-invalidate_lock-in-blkzeroout-ioctl.patch b/queue-5.15/block-hold-invalidate_lock-in-blkzeroout-ioctl.patch new file mode 100644 index 00000000000..10859e573a7 --- /dev/null +++ b/queue-5.15/block-hold-invalidate_lock-in-blkzeroout-ioctl.patch @@ -0,0 +1,60 @@ +From 35e4c6c1a2fc2eb11b9306e95cda1fa06a511948 Mon Sep 17 00:00:00 2001 +From: Shin'ichiro Kawasaki +Date: Tue, 9 Nov 2021 19:47:23 +0900 +Subject: block: Hold invalidate_lock in BLKZEROOUT ioctl + +From: Shin'ichiro Kawasaki + +commit 35e4c6c1a2fc2eb11b9306e95cda1fa06a511948 upstream. + +When BLKZEROOUT ioctl and data read race, the data read leaves stale +page cache. To avoid the stale page cache, hold invalidate_lock of the +block device file mapping. The stale page cache is observed when +blktests test case block/009 is modified to call "blkdiscard -z" command +and repeated hundreds of times. + +This patch can be applied back to the stable kernel version v5.15.y. +Rework is required for older stable kernels. + +Fixes: 22dd6d356628 ("block: invalidate the page cache when issuing BLKZEROOUT") +Signed-off-by: Shin'ichiro Kawasaki +Cc: stable@vger.kernel.org # v5.15 +Reviewed-by: Jan Kara +Link: https://lore.kernel.org/r/20211109104723.835533-3-shinichiro.kawasaki@wdc.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + block/ioctl.c | 12 +++++++++--- + 1 file changed, 9 insertions(+), 3 deletions(-) + +--- a/block/ioctl.c ++++ b/block/ioctl.c +@@ -154,6 +154,7 @@ static int blk_ioctl_zeroout(struct bloc + { + uint64_t range[2]; + uint64_t start, end, len; ++ struct inode *inode = bdev->bd_inode; + int err; + + if (!(mode & FMODE_WRITE)) +@@ -176,12 +177,17 @@ static int blk_ioctl_zeroout(struct bloc + return -EINVAL; + + /* Invalidate the page cache, including dirty pages */ ++ filemap_invalidate_lock(inode->i_mapping); + err = truncate_bdev_range(bdev, mode, start, end); + if (err) +- return err; ++ goto fail; + +- return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, +- BLKDEV_ZERO_NOUNMAP); ++ err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, ++ BLKDEV_ZERO_NOUNMAP); ++ ++fail: ++ filemap_invalidate_unlock(inode->i_mapping); ++ return err; + } + + static int put_ushort(unsigned short __user *argp, unsigned short val) diff --git a/queue-5.15/ceph-fix-mdsmap-decode-when-there-are-mds-s-beyond-max_mds.patch b/queue-5.15/ceph-fix-mdsmap-decode-when-there-are-mds-s-beyond-max_mds.patch new file mode 100644 index 00000000000..74010edfb6e --- /dev/null +++ b/queue-5.15/ceph-fix-mdsmap-decode-when-there-are-mds-s-beyond-max_mds.patch @@ -0,0 +1,40 @@ +From 0e24421ac431e7af62d4acef6c638b85aae51728 Mon Sep 17 00:00:00 2001 +From: Xiubo Li +Date: Fri, 5 Nov 2021 17:34:18 +0800 +Subject: ceph: fix mdsmap decode when there are MDS's beyond max_mds + +From: Xiubo Li + +commit 0e24421ac431e7af62d4acef6c638b85aae51728 upstream. + +If the max_mds is decreased in a cephfs cluster, there is a window +of time before the MDSs are removed. If a map goes out during this +period, the mdsmap may show the decreased max_mds but still shows +those MDSes as in or in the export target list. + +Ensure that we don't fail the map decode in that case. + +Cc: stable@vger.kernel.org +URL: https://tracker.ceph.com/issues/52436 +Fixes: d517b3983dd3 ("ceph: reconnect to the export targets on new mdsmaps") +Signed-off-by: Xiubo Li +Reviewed-by: Jeff Layton +Signed-off-by: Ilya Dryomov +Signed-off-by: Greg Kroah-Hartman +--- + fs/ceph/mdsmap.c | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/fs/ceph/mdsmap.c ++++ b/fs/ceph/mdsmap.c +@@ -263,10 +263,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(v + goto nomem; + for (j = 0; j < num_export_targets; j++) { + target = ceph_decode_32(&pexport_targets); +- if (target >= m->possible_max_rank) { +- err = -EIO; +- goto corrupt; +- } + info->export_targets[j] = target; + } + } else { diff --git a/queue-5.15/dmaengine-bestcomm-fix-system-boot-lockups.patch b/queue-5.15/dmaengine-bestcomm-fix-system-boot-lockups.patch new file mode 100644 index 00000000000..6a5bac33510 --- /dev/null +++ b/queue-5.15/dmaengine-bestcomm-fix-system-boot-lockups.patch @@ -0,0 +1,130 @@ +From adec566b05288f2787a1f88dbaf77ed8b0c644fa Mon Sep 17 00:00:00 2001 +From: Anatolij Gustschin +Date: Thu, 14 Oct 2021 11:40:12 +0200 +Subject: dmaengine: bestcomm: fix system boot lockups + +From: Anatolij Gustschin + +commit adec566b05288f2787a1f88dbaf77ed8b0c644fa upstream. + +memset() and memcpy() on an MMIO region like here results in a +lockup at startup on mpc5200 platform (since this first happens +during probing of the ATA and Ethernet drivers). Use memset_io() +and memcpy_toio() instead. + +Fixes: 2f9ea1bde0d1 ("bestcomm: core bestcomm support for Freescale MPC5200") +Cc: stable@vger.kernel.org # v5.14+ +Signed-off-by: Anatolij Gustschin +Link: https://lore.kernel.org/r/20211014094012.21286-1-agust@denx.de +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman +--- + drivers/dma/bestcomm/ata.c | 2 +- + drivers/dma/bestcomm/bestcomm.c | 22 +++++++++++----------- + drivers/dma/bestcomm/fec.c | 4 ++-- + drivers/dma/bestcomm/gen_bd.c | 4 ++-- + 4 files changed, 16 insertions(+), 16 deletions(-) + +--- a/drivers/dma/bestcomm/ata.c ++++ b/drivers/dma/bestcomm/ata.c +@@ -133,7 +133,7 @@ void bcom_ata_reset_bd(struct bcom_task + struct bcom_ata_var *var; + + /* Reset all BD */ +- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); ++ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + tsk->index = 0; + tsk->outdex = 0; +--- a/drivers/dma/bestcomm/bestcomm.c ++++ b/drivers/dma/bestcomm/bestcomm.c +@@ -95,7 +95,7 @@ bcom_task_alloc(int bd_count, int bd_siz + tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); + if (!tsk->bd) + goto error; +- memset(tsk->bd, 0x00, bd_count * bd_size); ++ memset_io(tsk->bd, 0x00, bd_count * bd_size); + + tsk->num_bd = bd_count; + tsk->bd_size = bd_size; +@@ -186,16 +186,16 @@ bcom_load_image(int task, u32 *task_imag + inc = bcom_task_inc(task); + + /* Clear & copy */ +- memset(var, 0x00, BCOM_VAR_SIZE); +- memset(inc, 0x00, BCOM_INC_SIZE); ++ memset_io(var, 0x00, BCOM_VAR_SIZE); ++ memset_io(inc, 0x00, BCOM_INC_SIZE); + + desc_src = (u32 *)(hdr + 1); + var_src = desc_src + hdr->desc_size; + inc_src = var_src + hdr->var_size; + +- memcpy(desc, desc_src, hdr->desc_size * sizeof(u32)); +- memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); +- memcpy(inc, inc_src, hdr->inc_size * sizeof(u32)); ++ memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32)); ++ memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); ++ memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32)); + + return 0; + } +@@ -302,13 +302,13 @@ static int bcom_engine_init(void) + return -ENOMEM; + } + +- memset(bcom_eng->tdt, 0x00, tdt_size); +- memset(bcom_eng->ctx, 0x00, ctx_size); +- memset(bcom_eng->var, 0x00, var_size); +- memset(bcom_eng->fdt, 0x00, fdt_size); ++ memset_io(bcom_eng->tdt, 0x00, tdt_size); ++ memset_io(bcom_eng->ctx, 0x00, ctx_size); ++ memset_io(bcom_eng->var, 0x00, var_size); ++ memset_io(bcom_eng->fdt, 0x00, fdt_size); + + /* Copy the FDT for the EU#3 */ +- memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); ++ memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); + + /* Initialize Task base structure */ + for (task=0; taskindex = 0; + tsk->outdex = 0; + +- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); ++ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); +@@ -241,7 +241,7 @@ bcom_fec_tx_reset(struct bcom_task *tsk) + tsk->index = 0; + tsk->outdex = 0; + +- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); ++ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); +--- a/drivers/dma/bestcomm/gen_bd.c ++++ b/drivers/dma/bestcomm/gen_bd.c +@@ -142,7 +142,7 @@ bcom_gen_bd_rx_reset(struct bcom_task *t + tsk->index = 0; + tsk->outdex = 0; + +- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); ++ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); +@@ -226,7 +226,7 @@ bcom_gen_bd_tx_reset(struct bcom_task *t + tsk->index = 0; + tsk->outdex = 0; + +- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); ++ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); diff --git a/queue-5.15/dmaengine-ti-k3-udma-set-bchan-to-null-if-a-channel-request-fail.patch b/queue-5.15/dmaengine-ti-k3-udma-set-bchan-to-null-if-a-channel-request-fail.patch new file mode 100644 index 00000000000..667075384b0 --- /dev/null +++ b/queue-5.15/dmaengine-ti-k3-udma-set-bchan-to-null-if-a-channel-request-fail.patch @@ -0,0 +1,51 @@ +From 5c6c6d60e4b489308ae4da8424c869f7cc53cd12 Mon Sep 17 00:00:00 2001 +From: Kishon Vijay Abraham I +Date: Sun, 31 Oct 2021 08:54:10 +0530 +Subject: dmaengine: ti: k3-udma: Set bchan to NULL if a channel request fail + +From: Kishon Vijay Abraham I + +commit 5c6c6d60e4b489308ae4da8424c869f7cc53cd12 upstream. + +bcdma_get_*() checks if bchan is already allocated by checking if it +has a NON NULL value. For the error cases, bchan will have error value +and bcdma_get_*() considers this as already allocated (PASS) since the +error values are NON NULL. This results in NULL pointer dereference +error while de-referencing bchan. + +Reset the value of bchan to NULL if a channel request fails. + +CC: stable@vger.kernel.org +Acked-by: Peter Ujfalusi +Signed-off-by: Kishon Vijay Abraham I +Link: https://lore.kernel.org/r/20211031032411.27235-2-kishon@ti.com +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman +--- + drivers/dma/ti/k3-udma.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/drivers/dma/ti/k3-udma.c ++++ b/drivers/dma/ti/k3-udma.c +@@ -1348,6 +1348,7 @@ static int bcdma_get_bchan(struct udma_c + { + struct udma_dev *ud = uc->ud; + enum udma_tp_level tpl; ++ int ret; + + if (uc->bchan) { + dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", +@@ -1365,8 +1366,11 @@ static int bcdma_get_bchan(struct udma_c + tpl = ud->bchan_tpl.levels - 1; + + uc->bchan = __udma_reserve_bchan(ud, tpl, -1); +- if (IS_ERR(uc->bchan)) +- return PTR_ERR(uc->bchan); ++ if (IS_ERR(uc->bchan)) { ++ ret = PTR_ERR(uc->bchan); ++ uc->bchan = NULL; ++ return ret; ++ } + + uc->tchan = uc->bchan; + diff --git a/queue-5.15/dmaengine-ti-k3-udma-set-r-tchan-or-rflow-to-null-if-request-fail.patch b/queue-5.15/dmaengine-ti-k3-udma-set-r-tchan-or-rflow-to-null-if-request-fail.patch new file mode 100644 index 00000000000..2838fd76a7a --- /dev/null +++ b/queue-5.15/dmaengine-ti-k3-udma-set-r-tchan-or-rflow-to-null-if-request-fail.patch @@ -0,0 +1,97 @@ +From eb91224e47ec33a0a32c9be0ec0fcb3433e555fd Mon Sep 17 00:00:00 2001 +From: Kishon Vijay Abraham I +Date: Sun, 31 Oct 2021 08:54:11 +0530 +Subject: dmaengine: ti: k3-udma: Set r/tchan or rflow to NULL if request fail + +From: Kishon Vijay Abraham I + +commit eb91224e47ec33a0a32c9be0ec0fcb3433e555fd upstream. + +udma_get_*() checks if rchan/tchan/rflow is already allocated by checking +if it has a NON NULL value. For the error cases, rchan/tchan/rflow will +have error value and udma_get_*() considers this as already allocated +(PASS) since the error values are NON NULL. This results in NULL pointer +dereference error while de-referencing rchan/tchan/rflow. + +Reset the value of rchan/tchan/rflow to NULL if a channel request fails. + +CC: stable@vger.kernel.org +Acked-by: Peter Ujfalusi +Signed-off-by: Kishon Vijay Abraham I +Link: https://lore.kernel.org/r/20211031032411.27235-3-kishon@ti.com +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman +--- + drivers/dma/ti/k3-udma.c | 24 ++++++++++++++++++++---- + 1 file changed, 20 insertions(+), 4 deletions(-) + +--- a/drivers/dma/ti/k3-udma.c ++++ b/drivers/dma/ti/k3-udma.c +@@ -1380,6 +1380,7 @@ static int bcdma_get_bchan(struct udma_c + static int udma_get_tchan(struct udma_chan *uc) + { + struct udma_dev *ud = uc->ud; ++ int ret; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", +@@ -1394,8 +1395,11 @@ static int udma_get_tchan(struct udma_ch + */ + uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, + uc->config.mapped_channel_id); +- if (IS_ERR(uc->tchan)) +- return PTR_ERR(uc->tchan); ++ if (IS_ERR(uc->tchan)) { ++ ret = PTR_ERR(uc->tchan); ++ uc->tchan = NULL; ++ return ret; ++ } + + if (ud->tflow_cnt) { + int tflow_id; +@@ -1425,6 +1429,7 @@ static int udma_get_tchan(struct udma_ch + static int udma_get_rchan(struct udma_chan *uc) + { + struct udma_dev *ud = uc->ud; ++ int ret; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", +@@ -1439,8 +1444,13 @@ static int udma_get_rchan(struct udma_ch + */ + uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, + uc->config.mapped_channel_id); ++ if (IS_ERR(uc->rchan)) { ++ ret = PTR_ERR(uc->rchan); ++ uc->rchan = NULL; ++ return ret; ++ } + +- return PTR_ERR_OR_ZERO(uc->rchan); ++ return 0; + } + + static int udma_get_chan_pair(struct udma_chan *uc) +@@ -1494,6 +1504,7 @@ static int udma_get_chan_pair(struct udm + static int udma_get_rflow(struct udma_chan *uc, int flow_id) + { + struct udma_dev *ud = uc->ud; ++ int ret; + + if (!uc->rchan) { + dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); +@@ -1507,8 +1518,13 @@ static int udma_get_rflow(struct udma_ch + } + + uc->rflow = __udma_get_rflow(ud, flow_id); ++ if (IS_ERR(uc->rflow)) { ++ ret = PTR_ERR(uc->rflow); ++ uc->rflow = NULL; ++ return ret; ++ } + +- return PTR_ERR_OR_ZERO(uc->rflow); ++ return 0; + } + + static void bcdma_put_bchan(struct udma_chan *uc) diff --git a/queue-5.15/drm-i915-guc-fix-blocked-context-accounting.patch b/queue-5.15/drm-i915-guc-fix-blocked-context-accounting.patch new file mode 100644 index 00000000000..d0c9b7f9fac --- /dev/null +++ b/queue-5.15/drm-i915-guc-fix-blocked-context-accounting.patch @@ -0,0 +1,36 @@ +From fc30a6764a54dea42291aeb7009bef7aa2fc1cd4 Mon Sep 17 00:00:00 2001 +From: Matthew Brost +Date: Thu, 9 Sep 2021 09:47:22 -0700 +Subject: drm/i915/guc: Fix blocked context accounting + +From: Matthew Brost + +commit fc30a6764a54dea42291aeb7009bef7aa2fc1cd4 upstream. + +Prior to this patch the blocked context counter was cleared on +init_sched_state (used during registering a context & resets) which is +incorrect. This state needs to be persistent or the counter can read the +incorrect value resulting in scheduling never getting enabled again. + +Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation") +Signed-off-by: Matthew Brost +Reviewed-by: Daniel Vetter +Cc: +Signed-off-by: John Harrison +Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-2-matthew.brost@intel.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c ++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +@@ -152,7 +152,7 @@ static inline void init_sched_state(stru + { + /* Only should be called from guc_lrc_desc_pin() */ + atomic_set(&ce->guc_sched_state_no_lock, 0); +- ce->guc_state.sched_state = 0; ++ ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; + } + + static inline bool diff --git a/queue-5.15/erofs-fix-unsafe-pagevec-reuse-of-hooked-pclusters.patch b/queue-5.15/erofs-fix-unsafe-pagevec-reuse-of-hooked-pclusters.patch new file mode 100644 index 00000000000..6d5705fe38a --- /dev/null +++ b/queue-5.15/erofs-fix-unsafe-pagevec-reuse-of-hooked-pclusters.patch @@ -0,0 +1,121 @@ +From 86432a6dca9bed79111990851df5756d3eb5f57c Mon Sep 17 00:00:00 2001 +From: Gao Xiang +Date: Thu, 4 Nov 2021 02:20:06 +0800 +Subject: erofs: fix unsafe pagevec reuse of hooked pclusters + +From: Gao Xiang + +commit 86432a6dca9bed79111990851df5756d3eb5f57c upstream. + +There are pclusters in runtime marked with Z_EROFS_PCLUSTER_TAIL +before actual I/O submission. Thus, the decompression chain can be +extended if the following pcluster chain hooks such tail pcluster. + +As the related comment mentioned, if some page is made of a hooked +pcluster and another followed pcluster, it can be reused for in-place +I/O (since I/O should be submitted anyway): + _______________________________________________________________ +| tail (partial) page | head (partial) page | +|_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| + +However, it's by no means safe to reuse as pagevec since if such +PRIMARY_HOOKED pclusters finally move into bypass chain without I/O +submission. It's somewhat hard to reproduce with LZ4 and I just found +it (general protection fault) by ro_fsstressing a LZMA image for long +time. + +I'm going to actively clean up related code together with multi-page +folio adaption in the next few months. Let's address it directly for +easier backporting for now. + +Call trace for reference: + z_erofs_decompress_pcluster+0x10a/0x8a0 [erofs] + z_erofs_decompress_queue.isra.36+0x3c/0x60 [erofs] + z_erofs_runqueue+0x5f3/0x840 [erofs] + z_erofs_readahead+0x1e8/0x320 [erofs] + read_pages+0x91/0x270 + page_cache_ra_unbounded+0x18b/0x240 + filemap_get_pages+0x10a/0x5f0 + filemap_read+0xa9/0x330 + new_sync_read+0x11b/0x1a0 + vfs_read+0xf1/0x190 + +Link: https://lore.kernel.org/r/20211103182006.4040-1-xiang@kernel.org +Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") +Cc: # 4.19+ +Reviewed-by: Chao Yu +Signed-off-by: Gao Xiang +Signed-off-by: Greg Kroah-Hartman +--- + fs/erofs/zdata.c | 13 +++++++------ + fs/erofs/zpvec.h | 13 ++++++++++--- + 2 files changed, 17 insertions(+), 9 deletions(-) + +--- a/fs/erofs/zdata.c ++++ b/fs/erofs/zdata.c +@@ -373,8 +373,8 @@ static bool z_erofs_try_inplace_io(struc + + /* callers must be with collection lock held */ + static int z_erofs_attach_page(struct z_erofs_collector *clt, +- struct page *page, +- enum z_erofs_page_type type) ++ struct page *page, enum z_erofs_page_type type, ++ bool pvec_safereuse) + { + int ret; + +@@ -384,9 +384,9 @@ static int z_erofs_attach_page(struct z_ + z_erofs_try_inplace_io(clt, page)) + return 0; + +- ret = z_erofs_pagevec_enqueue(&clt->vector, page, type); ++ ret = z_erofs_pagevec_enqueue(&clt->vector, page, type, ++ pvec_safereuse); + clt->cl->vcnt += (unsigned int)ret; +- + return ret ? 0 : -EAGAIN; + } + +@@ -729,7 +729,8 @@ hitted: + tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); + + retry: +- err = z_erofs_attach_page(clt, page, page_type); ++ err = z_erofs_attach_page(clt, page, page_type, ++ clt->mode >= COLLECT_PRIMARY_FOLLOWED); + /* should allocate an additional short-lived page for pagevec */ + if (err == -EAGAIN) { + struct page *const newpage = +@@ -737,7 +738,7 @@ retry: + + set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE); + err = z_erofs_attach_page(clt, newpage, +- Z_EROFS_PAGE_TYPE_EXCLUSIVE); ++ Z_EROFS_PAGE_TYPE_EXCLUSIVE, true); + if (!err) + goto retry; + } +--- a/fs/erofs/zpvec.h ++++ b/fs/erofs/zpvec.h +@@ -106,11 +106,18 @@ static inline void z_erofs_pagevec_ctor_ + + static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, + struct page *page, +- enum z_erofs_page_type type) ++ enum z_erofs_page_type type, ++ bool pvec_safereuse) + { +- if (!ctor->next && type) +- if (ctor->index + 1 == ctor->nr) ++ if (!ctor->next) { ++ /* some pages cannot be reused as pvec safely without I/O */ ++ if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse) ++ type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED; ++ ++ if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE && ++ ctor->index + 1 == ctor->nr) + return false; ++ } + + if (ctor->index >= ctor->nr) + z_erofs_pagevec_ctor_pagedown(ctor, false); diff --git a/queue-5.15/f2fs-fix-uaf-in-f2fs_available_free_memory.patch b/queue-5.15/f2fs-fix-uaf-in-f2fs_available_free_memory.patch new file mode 100644 index 00000000000..9c1a5ec6b66 --- /dev/null +++ b/queue-5.15/f2fs-fix-uaf-in-f2fs_available_free_memory.patch @@ -0,0 +1,52 @@ +From 5429c9dbc9025f9a166f64e22e3a69c94fd5b29b Mon Sep 17 00:00:00 2001 +From: Dongliang Mu +Date: Thu, 4 Nov 2021 16:22:01 +0800 +Subject: f2fs: fix UAF in f2fs_available_free_memory + +From: Dongliang Mu + +commit 5429c9dbc9025f9a166f64e22e3a69c94fd5b29b upstream. + +if2fs_fill_super +-> f2fs_build_segment_manager + -> create_discard_cmd_control + -> f2fs_start_discard_thread + +It invokes kthread_run to create a thread and run issue_discard_thread. + +However, if f2fs_build_node_manager fails, the control flow goes to +free_nm and calls f2fs_destroy_node_manager. This function will free +sbi->nm_info. However, if issue_discard_thread accesses sbi->nm_info +after the deallocation, but before the f2fs_stop_discard_thread, it will +cause UAF(Use-after-free). + +-> f2fs_destroy_segment_manager + -> destroy_discard_cmd_control + -> f2fs_stop_discard_thread + +Fix this by stopping discard thread before f2fs_destroy_node_manager. + +Note that, the commit d6d2b491a82e1 introduces the call of +f2fs_available_free_memory into issue_discard_thread. + +Cc: stable@vger.kernel.org +Fixes: d6d2b491a82e ("f2fs: allow to change discard policy based on cached discard cmds") +Signed-off-by: Dongliang Mu +Reviewed-by: Chao Yu +Signed-off-by: Jaegeuk Kim +Signed-off-by: Greg Kroah-Hartman +--- + fs/f2fs/super.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -4352,6 +4352,8 @@ free_node_inode: + free_stats: + f2fs_destroy_stats(sbi); + free_nm: ++ /* stop discard thread before destroying node manager */ ++ f2fs_stop_discard_thread(sbi); + f2fs_destroy_node_manager(sbi); + free_sm: + f2fs_destroy_segment_manager(sbi); diff --git a/queue-5.15/f2fs-include-non-compressed-blocks-in-compr_written_block.patch b/queue-5.15/f2fs-include-non-compressed-blocks-in-compr_written_block.patch new file mode 100644 index 00000000000..f67ff2bcfab --- /dev/null +++ b/queue-5.15/f2fs-include-non-compressed-blocks-in-compr_written_block.patch @@ -0,0 +1,31 @@ +From 09631cf3234d32156e7cae32275f5a4144c683c5 Mon Sep 17 00:00:00 2001 +From: Daeho Jeong +Date: Wed, 6 Oct 2021 10:49:10 -0700 +Subject: f2fs: include non-compressed blocks in compr_written_block + +From: Daeho Jeong + +commit 09631cf3234d32156e7cae32275f5a4144c683c5 upstream. + +Need to include non-compressed blocks in compr_written_block to +estimate average compression ratio more accurately. + +Fixes: 5ac443e26a09 ("f2fs: add sysfs nodes to get runtime compression stat") +Cc: stable@vger.kernel.org +Signed-off-by: Daeho Jeong +Signed-off-by: Jaegeuk Kim +Signed-off-by: Greg Kroah-Hartman +--- + fs/f2fs/compress.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -1530,6 +1530,7 @@ int f2fs_write_multi_pages(struct compre + if (cluster_may_compress(cc)) { + err = f2fs_compress_pages(cc); + if (err == -EAGAIN) { ++ add_compr_block_stat(cc->inode, cc->cluster_size); + goto write; + } else if (err) { + f2fs_put_rpages_wbc(cc, wbc, true, 1); diff --git a/queue-5.15/f2fs-should-use-gfp_nofs-for-directory-inodes.patch b/queue-5.15/f2fs-should-use-gfp_nofs-for-directory-inodes.patch new file mode 100644 index 00000000000..5c2b63d4875 --- /dev/null +++ b/queue-5.15/f2fs-should-use-gfp_nofs-for-directory-inodes.patch @@ -0,0 +1,104 @@ +From 92d602bc7177325e7453189a22e0c8764ed3453e Mon Sep 17 00:00:00 2001 +From: Jaegeuk Kim +Date: Tue, 7 Sep 2021 10:24:21 -0700 +Subject: f2fs: should use GFP_NOFS for directory inodes + +From: Jaegeuk Kim + +commit 92d602bc7177325e7453189a22e0c8764ed3453e upstream. + +We use inline_dentry which requires to allocate dentry page when adding a link. +If we allow to reclaim memory from filesystem, we do down_read(&sbi->cp_rwsem) +twice by f2fs_lock_op(). I think this should be okay, but how about stopping +the lockdep complaint [1]? + +f2fs_create() + - f2fs_lock_op() + - f2fs_do_add_link() + - __f2fs_find_entry + - f2fs_get_read_data_page() + -> kswapd + - shrink_node + - f2fs_evict_inode + - f2fs_lock_op() + +[1] + +fs_reclaim +){+.+.}-{0:0} +: +kswapd0: lock_acquire+0x114/0x394 +kswapd0: __fs_reclaim_acquire+0x40/0x50 +kswapd0: prepare_alloc_pages+0x94/0x1ec +kswapd0: __alloc_pages_nodemask+0x78/0x1b0 +kswapd0: pagecache_get_page+0x2e0/0x57c +kswapd0: f2fs_get_read_data_page+0xc0/0x394 +kswapd0: f2fs_find_data_page+0xa4/0x23c +kswapd0: find_in_level+0x1a8/0x36c +kswapd0: __f2fs_find_entry+0x70/0x100 +kswapd0: f2fs_do_add_link+0x84/0x1ec +kswapd0: f2fs_mkdir+0xe4/0x1e4 +kswapd0: vfs_mkdir+0x110/0x1c0 +kswapd0: do_mkdirat+0xa4/0x160 +kswapd0: __arm64_sys_mkdirat+0x24/0x34 +kswapd0: el0_svc_common.llvm.17258447499513131576+0xc4/0x1e8 +kswapd0: do_el0_svc+0x28/0xa0 +kswapd0: el0_svc+0x24/0x38 +kswapd0: el0_sync_handler+0x88/0xec +kswapd0: el0_sync+0x1c0/0x200 +kswapd0: +-> #1 +( +&sbi->cp_rwsem +){++++}-{3:3} +: +kswapd0: lock_acquire+0x114/0x394 +kswapd0: down_read+0x7c/0x98 +kswapd0: f2fs_do_truncate_blocks+0x78/0x3dc +kswapd0: f2fs_truncate+0xc8/0x128 +kswapd0: f2fs_evict_inode+0x2b8/0x8b8 +kswapd0: evict+0xd4/0x2f8 +kswapd0: iput+0x1c0/0x258 +kswapd0: do_unlinkat+0x170/0x2a0 +kswapd0: __arm64_sys_unlinkat+0x4c/0x68 +kswapd0: el0_svc_common.llvm.17258447499513131576+0xc4/0x1e8 +kswapd0: do_el0_svc+0x28/0xa0 +kswapd0: el0_svc+0x24/0x38 +kswapd0: el0_sync_handler+0x88/0xec +kswapd0: el0_sync+0x1c0/0x200 + +Cc: stable@vger.kernel.org +Fixes: bdbc90fa55af ("f2fs: don't put dentry page in pagecache into highmem") +Reviewed-by: Chao Yu +Reviewed-by: Stanley Chu +Reviewed-by: Light Hsieh +Tested-by: Light Hsieh +Signed-off-by: Jaegeuk Kim +Signed-off-by: Greg Kroah-Hartman +--- + fs/f2fs/inode.c | 2 +- + fs/f2fs/namei.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +--- a/fs/f2fs/inode.c ++++ b/fs/f2fs/inode.c +@@ -527,7 +527,7 @@ make_now: + inode->i_op = &f2fs_dir_inode_operations; + inode->i_fop = &f2fs_dir_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; +- inode_nohighmem(inode); ++ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + } else if (S_ISLNK(inode->i_mode)) { + if (file_is_encrypt(inode)) + inode->i_op = &f2fs_encrypted_symlink_inode_operations; +--- a/fs/f2fs/namei.c ++++ b/fs/f2fs/namei.c +@@ -757,7 +757,7 @@ static int f2fs_mkdir(struct user_namesp + inode->i_op = &f2fs_dir_inode_operations; + inode->i_fop = &f2fs_dir_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; +- inode_nohighmem(inode); ++ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + + set_inode_flag(inode, FI_INC_LINK); + f2fs_lock_op(sbi); diff --git a/queue-5.15/irqchip-sifive-plic-fixup-eoi-failed-when-masked.patch b/queue-5.15/irqchip-sifive-plic-fixup-eoi-failed-when-masked.patch new file mode 100644 index 00000000000..a239c12ccb3 --- /dev/null +++ b/queue-5.15/irqchip-sifive-plic-fixup-eoi-failed-when-masked.patch @@ -0,0 +1,65 @@ +From 69ea463021be0d159ab30f96195fb0dd18ee2272 Mon Sep 17 00:00:00 2001 +From: Guo Ren +Date: Fri, 5 Nov 2021 17:47:48 +0800 +Subject: irqchip/sifive-plic: Fixup EOI failed when masked + +From: Guo Ren + +commit 69ea463021be0d159ab30f96195fb0dd18ee2272 upstream. + +When using "devm_request_threaded_irq(,,,,IRQF_ONESHOT,,)" in a driver, +only the first interrupt is handled, and following interrupts are never +delivered (initially reported in [1]). + +That's because the RISC-V PLIC cannot EOI masked interrupts, as explained +in the description of Interrupt Completion in the PLIC spec [2]: + + +The PLIC signals it has completed executing an interrupt handler by +writing the interrupt ID it received from the claim to the claim/complete +register. The PLIC does not check whether the completion ID is the same +as the last claim ID for that target. If the completion ID does not match +an interrupt source that *is currently enabled* for the target, the +completion is silently ignored. + + +Re-enable the interrupt before completion if it has been masked during +the handling, and remask it afterwards. + +[1] http://lists.infradead.org/pipermail/linux-riscv/2021-July/007441.html +[2] https://github.com/riscv/riscv-plic-spec/blob/8bc15a35d07c9edf7b5d23fec9728302595ffc4d/riscv-plic.adoc + +Fixes: bb0fed1c60cc ("irqchip/sifive-plic: Switch to fasteoi flow") +Reported-by: Vincent Pelletier +Tested-by: Nikita Shubin +Signed-off-by: Guo Ren +Cc: stable@vger.kernel.org +Cc: Thomas Gleixner +Cc: Palmer Dabbelt +Cc: Atish Patra +Reviewed-by: Anup Patel +[maz: amended commit message] +Signed-off-by: Marc Zyngier +Link: https://lore.kernel.org/r/20211105094748.3894453-1-guoren@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/irqchip/irq-sifive-plic.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +--- a/drivers/irqchip/irq-sifive-plic.c ++++ b/drivers/irqchip/irq-sifive-plic.c +@@ -163,7 +163,13 @@ static void plic_irq_eoi(struct irq_data + { + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + +- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); ++ if (irqd_irq_masked(d)) { ++ plic_irq_unmask(d); ++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); ++ plic_irq_mask(d); ++ } else { ++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); ++ } + } + + static struct irq_chip plic_chip = { diff --git a/queue-5.15/ksmbd-don-t-need-8byte-alignment-for-request-length-in-ksmbd_check_message.patch b/queue-5.15/ksmbd-don-t-need-8byte-alignment-for-request-length-in-ksmbd_check_message.patch new file mode 100644 index 00000000000..6347e3de5e0 --- /dev/null +++ b/queue-5.15/ksmbd-don-t-need-8byte-alignment-for-request-length-in-ksmbd_check_message.patch @@ -0,0 +1,40 @@ +From b53ad8107ee873795ecb5039d46b5d5502d404f2 Mon Sep 17 00:00:00 2001 +From: Namjae Jeon +Date: Fri, 29 Oct 2021 08:09:50 +0900 +Subject: ksmbd: don't need 8byte alignment for request length in ksmbd_check_message + +From: Namjae Jeon + +commit b53ad8107ee873795ecb5039d46b5d5502d404f2 upstream. + +When validating request length in ksmbd_check_message, 8byte alignment +is not needed for compound request. It can cause wrong validation +of request length. + +Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3") +Cc: stable@vger.kernel.org # v5.15 +Acked-by: Hyunchul Lee +Signed-off-by: Namjae Jeon +Signed-off-by: Steve French +Signed-off-by: Greg Kroah-Hartman +--- + fs/ksmbd/smb2misc.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/fs/ksmbd/smb2misc.c ++++ b/fs/ksmbd/smb2misc.c +@@ -358,12 +358,10 @@ int ksmbd_smb2_check_message(struct ksmb + hdr = &pdu->hdr; + } + +- if (le32_to_cpu(hdr->NextCommand) > 0) { ++ if (le32_to_cpu(hdr->NextCommand) > 0) + len = le32_to_cpu(hdr->NextCommand); +- } else if (work->next_smb2_rcv_hdr_off) { ++ else if (work->next_smb2_rcv_hdr_off) + len -= work->next_smb2_rcv_hdr_off; +- len = round_up(len, 8); +- } + + if (check_smb2_hdr(hdr)) + return 1; diff --git a/queue-5.15/ksmbd-fix-buffer-length-check-in-fsctl_validate_negotiate_info.patch b/queue-5.15/ksmbd-fix-buffer-length-check-in-fsctl_validate_negotiate_info.patch new file mode 100644 index 00000000000..f8581780a35 --- /dev/null +++ b/queue-5.15/ksmbd-fix-buffer-length-check-in-fsctl_validate_negotiate_info.patch @@ -0,0 +1,37 @@ +From 78f1688a64cca77758ceb9b183088cf0054bfc82 Mon Sep 17 00:00:00 2001 +From: Marios Makassikis +Date: Thu, 28 Oct 2021 21:01:27 +0200 +Subject: ksmbd: Fix buffer length check in fsctl_validate_negotiate_info() + +From: Marios Makassikis + +commit 78f1688a64cca77758ceb9b183088cf0054bfc82 upstream. + +The validate_negotiate_info_req struct definition includes an extra +field to access the data coming after the header. This causes the check +in fsctl_validate_negotiate_info() to count the first element of the +array twice. This in turn makes some valid requests fail, depending on +whether they include padding or not. + +Fixes: f7db8fd03a4b ("ksmbd: add validation in smb2_ioctl") +Cc: stable@vger.kernel.org # v5.15 +Acked-by: Namjae Jeon +Acked-by: Hyunchul Lee +Signed-off-by: Marios Makassikis +Signed-off-by: Steve French +Signed-off-by: Greg Kroah-Hartman +--- + fs/ksmbd/smb2pdu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/fs/ksmbd/smb2pdu.c ++++ b/fs/ksmbd/smb2pdu.c +@@ -7319,7 +7319,7 @@ static int fsctl_validate_negotiate_info + int ret = 0; + int dialect; + +- if (in_buf_len < sizeof(struct validate_negotiate_info_req) + ++ if (in_buf_len < offsetof(struct validate_negotiate_info_req, Dialects) + + le16_to_cpu(neg_req->DialectCount) * sizeof(__le16)) + return -EINVAL; + diff --git a/queue-5.15/posix-cpu-timers-clear-task-posix_cputimers_work-in-copy_process.patch b/queue-5.15/posix-cpu-timers-clear-task-posix_cputimers_work-in-copy_process.patch new file mode 100644 index 00000000000..23a76f4708e --- /dev/null +++ b/queue-5.15/posix-cpu-timers-clear-task-posix_cputimers_work-in-copy_process.patch @@ -0,0 +1,111 @@ +From ca7752caeaa70bd31d1714af566c9809688544af Mon Sep 17 00:00:00 2001 +From: Michael Pratt +Date: Mon, 1 Nov 2021 17:06:15 -0400 +Subject: posix-cpu-timers: Clear task::posix_cputimers_work in copy_process() + +From: Michael Pratt + +commit ca7752caeaa70bd31d1714af566c9809688544af upstream. + +copy_process currently copies task_struct.posix_cputimers_work as-is. If a +timer interrupt arrives while handling clone and before dup_task_struct +completes then the child task will have: + +1. posix_cputimers_work.scheduled = true +2. posix_cputimers_work.work queued. + +copy_process clears task_struct.task_works, so (2) will have no effect and +posix_cpu_timers_work will never run (not to mention it doesn't make sense +for two tasks to share a common linked list). + +Since posix_cpu_timers_work never runs, posix_cputimers_work.scheduled is +never cleared. Since scheduled is set, future timer interrupts will skip +scheduling work, with the ultimate result that the task will never receive +timer expirations. + +Together, the complete flow is: + +1. Task 1 calls clone(), enters kernel. +2. Timer interrupt fires, schedules task work on Task 1. + 2a. task_struct.posix_cputimers_work.scheduled = true + 2b. task_struct.posix_cputimers_work.work added to + task_struct.task_works. +3. dup_task_struct() copies Task 1 to Task 2. +4. copy_process() clears task_struct.task_works for Task 2. +5. Future timer interrupts on Task 2 see + task_struct.posix_cputimers_work.scheduled = true and skip scheduling + work. + +Fix this by explicitly clearing contents of task_struct.posix_cputimers_work +in copy_process(). This was never meant to be shared or inherited across +tasks in the first place. + +Fixes: 1fb497dd0030 ("posix-cpu-timers: Provide mechanisms to defer timer handling to task_work") +Reported-by: Rhys Hiltner +Signed-off-by: Michael Pratt +Signed-off-by: Thomas Gleixner +Cc: +Link: https://lore.kernel.org/r/20211101210615.716522-1-mpratt@google.com +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/posix-timers.h | 2 ++ + kernel/fork.c | 1 + + kernel/time/posix-cpu-timers.c | 19 +++++++++++++++++-- + 3 files changed, 20 insertions(+), 2 deletions(-) + +--- a/include/linux/posix-timers.h ++++ b/include/linux/posix-timers.h +@@ -184,8 +184,10 @@ static inline void posix_cputimers_group + #endif + + #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK ++void clear_posix_cputimers_work(struct task_struct *p); + void posix_cputimers_init_work(void); + #else ++static inline void clear_posix_cputimers_work(struct task_struct *p) { } + static inline void posix_cputimers_init_work(void) { } + #endif + +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -2280,6 +2280,7 @@ static __latent_entropy struct task_stru + p->pdeath_signal = 0; + INIT_LIST_HEAD(&p->thread_group); + p->task_works = NULL; ++ clear_posix_cputimers_work(p); + + #ifdef CONFIG_KRETPROBES + p->kretprobe_instances.first = NULL; +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -1159,13 +1159,28 @@ static void posix_cpu_timers_work(struct + } + + /* ++ * Clear existing posix CPU timers task work. ++ */ ++void clear_posix_cputimers_work(struct task_struct *p) ++{ ++ /* ++ * A copied work entry from the old task is not meaningful, clear it. ++ * N.B. init_task_work will not do this. ++ */ ++ memset(&p->posix_cputimers_work.work, 0, ++ sizeof(p->posix_cputimers_work.work)); ++ init_task_work(&p->posix_cputimers_work.work, ++ posix_cpu_timers_work); ++ p->posix_cputimers_work.scheduled = false; ++} ++ ++/* + * Initialize posix CPU timers task work in init task. Out of line to + * keep the callback static and to avoid header recursion hell. + */ + void __init posix_cputimers_init_work(void) + { +- init_task_work(¤t->posix_cputimers_work.work, +- posix_cpu_timers_work); ++ clear_posix_cputimers_work(current); + } + + /* diff --git a/queue-5.15/series b/queue-5.15/series index 1825bbdb3e4..c24b05e21f9 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -857,3 +857,19 @@ x86-mce-add-errata-workaround-for-skylake-skx37.patch pci-msi-move-non-mask-check-back-into-low-level-accessors.patch pci-msi-destroy-sysfs-before-freeing-entries.patch kvm-x86-move-guest_pv_has-out-of-user_access-section.patch +posix-cpu-timers-clear-task-posix_cputimers_work-in-copy_process.patch +irqchip-sifive-plic-fixup-eoi-failed-when-masked.patch +f2fs-should-use-gfp_nofs-for-directory-inodes.patch +f2fs-include-non-compressed-blocks-in-compr_written_block.patch +f2fs-fix-uaf-in-f2fs_available_free_memory.patch +ceph-fix-mdsmap-decode-when-there-are-mds-s-beyond-max_mds.patch +erofs-fix-unsafe-pagevec-reuse-of-hooked-pclusters.patch +drm-i915-guc-fix-blocked-context-accounting.patch +block-hold-invalidate_lock-in-blkdiscard-ioctl.patch +block-hold-invalidate_lock-in-blkzeroout-ioctl.patch +block-hold-invalidate_lock-in-blkresetzone-ioctl.patch +ksmbd-fix-buffer-length-check-in-fsctl_validate_negotiate_info.patch +ksmbd-don-t-need-8byte-alignment-for-request-length-in-ksmbd_check_message.patch +dmaengine-ti-k3-udma-set-bchan-to-null-if-a-channel-request-fail.patch +dmaengine-ti-k3-udma-set-r-tchan-or-rflow-to-null-if-request-fail.patch +dmaengine-bestcomm-fix-system-boot-lockups.patch