From 11167d4ddaa18a808ec40a9728d4fb5d66e316b7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 22 Jan 2024 11:43:04 -0800 Subject: [PATCH] 6.6-stable patches added patches: block-add-check-that-partition-length-needs-to-be-aligned-with-block-size.patch block-fix-iterating-over-an-empty-bio-with-bio_for_each_folio_all.patch block-remove-special-casing-of-compound-pages.patch bluetooth-fix-atomicity-violation-in-min-max-_key_size_set.patch bpf-fix-re-attachment-branch-in-bpf_tracing_prog_attach.patch drm-amd-enable-pcie-pme-from-d3.patch fbdev-acornfb-fix-name-of-fb_ops-initializer-macro.patch fbdev-flush-deferred-io-before-closing.patch fbdev-flush-deferred-work-in-fb_deferred_io_fsync.patch io_uring-don-t-check-iopoll-if-request-completes.patch io_uring-ensure-local-task_work-is-run-on-wait-timeout.patch io_uring-rw-ensure-io-bytes_done-is-always-initialized.patch md-bypass-block-throttle-for-superblock-update.patch md-raid1-use-blk_opf_t-for-read-and-write-operations.patch netfilter-nf_tables-check-if-catch-all-set-element-is-active-in-next-generation.patch pwm-fix-out-of-bounds-access-in-of_pwm_single_xlate.patch pwm-jz4740-don-t-use-dev_err_probe-in-.request.patch rootfs-fix-support-for-rootfstype-when-root-is-given.patch scsi-mpi3mr-block-pel-enable-command-on-controller-reset-and-unrecoverable-state.patch scsi-mpi3mr-clean-up-block-devices-post-controller-reset.patch scsi-mpi3mr-refresh-sdev-queue-depth-after-controller-reset.patch scsi-target-core-add-missing-file_-start-end-_write.patch scsi-ufs-core-simplify-power-management-during-async-scan.patch --- ...-needs-to-be-aligned-with-block-size.patch | 62 ++++++++ ...mpty-bio-with-bio_for_each_folio_all.patch | 52 +++++++ ...ove-special-casing-of-compound-pages.patch | 134 ++++++++++++++++++ ...y-violation-in-min-max-_key_size_set.patch | 92 ++++++++++++ ...nt-branch-in-bpf_tracing_prog_attach.patch | 79 +++++++++++ .../drm-amd-enable-pcie-pme-from-d3.patch | 36 +++++ ...fix-name-of-fb_ops-initializer-macro.patch | 38 +++++ ...dev-flush-deferred-io-before-closing.patch | 46 ++++++ ...eferred-work-in-fb_deferred_io_fsync.patch | 51 +++++++ ...-t-check-iopoll-if-request-completes.patch | 39 +++++ ...cal-task_work-is-run-on-wait-timeout.patch | 55 +++++++ ...-io-bytes_done-is-always-initialized.patch | 50 +++++++ ...block-throttle-for-superblock-update.patch | 43 ++++++ ..._opf_t-for-read-and-write-operations.patch | 84 +++++++++++ ...element-is-active-in-next-generation.patch | 35 +++++ ...bounds-access-in-of_pwm_single_xlate.patch | 35 +++++ ...-don-t-use-dev_err_probe-in-.request.patch | 47 ++++++ ...rt-for-rootfstype-when-root-is-given.patch | 70 +++++++++ ...roller-reset-and-unrecoverable-state.patch | 49 +++++++ ...-block-devices-post-controller-reset.patch | 53 +++++++ ...v-queue-depth-after-controller-reset.patch | 42 ++++++ ...e-add-missing-file_-start-end-_write.patch | 60 ++++++++ ...y-power-management-during-async-scan.patch | 54 +++++++ queue-6.6/series | 23 +++ 24 files changed, 1329 insertions(+) create mode 100644 queue-6.6/block-add-check-that-partition-length-needs-to-be-aligned-with-block-size.patch create mode 100644 queue-6.6/block-fix-iterating-over-an-empty-bio-with-bio_for_each_folio_all.patch create mode 100644 queue-6.6/block-remove-special-casing-of-compound-pages.patch create mode 100644 queue-6.6/bluetooth-fix-atomicity-violation-in-min-max-_key_size_set.patch create mode 100644 queue-6.6/bpf-fix-re-attachment-branch-in-bpf_tracing_prog_attach.patch create mode 100644 queue-6.6/drm-amd-enable-pcie-pme-from-d3.patch create mode 100644 queue-6.6/fbdev-acornfb-fix-name-of-fb_ops-initializer-macro.patch create mode 100644 queue-6.6/fbdev-flush-deferred-io-before-closing.patch create mode 100644 queue-6.6/fbdev-flush-deferred-work-in-fb_deferred_io_fsync.patch create mode 100644 queue-6.6/io_uring-don-t-check-iopoll-if-request-completes.patch create mode 100644 queue-6.6/io_uring-ensure-local-task_work-is-run-on-wait-timeout.patch create mode 100644 queue-6.6/io_uring-rw-ensure-io-bytes_done-is-always-initialized.patch create mode 100644 queue-6.6/md-bypass-block-throttle-for-superblock-update.patch create mode 100644 queue-6.6/md-raid1-use-blk_opf_t-for-read-and-write-operations.patch create mode 100644 queue-6.6/netfilter-nf_tables-check-if-catch-all-set-element-is-active-in-next-generation.patch create mode 100644 queue-6.6/pwm-fix-out-of-bounds-access-in-of_pwm_single_xlate.patch create mode 100644 queue-6.6/pwm-jz4740-don-t-use-dev_err_probe-in-.request.patch create mode 100644 queue-6.6/rootfs-fix-support-for-rootfstype-when-root-is-given.patch create mode 100644 queue-6.6/scsi-mpi3mr-block-pel-enable-command-on-controller-reset-and-unrecoverable-state.patch create mode 100644 queue-6.6/scsi-mpi3mr-clean-up-block-devices-post-controller-reset.patch create mode 100644 queue-6.6/scsi-mpi3mr-refresh-sdev-queue-depth-after-controller-reset.patch create mode 100644 queue-6.6/scsi-target-core-add-missing-file_-start-end-_write.patch create mode 100644 queue-6.6/scsi-ufs-core-simplify-power-management-during-async-scan.patch diff --git a/queue-6.6/block-add-check-that-partition-length-needs-to-be-aligned-with-block-size.patch b/queue-6.6/block-add-check-that-partition-length-needs-to-be-aligned-with-block-size.patch new file mode 100644 index 00000000000..a2d8c1a2a5a --- /dev/null +++ b/queue-6.6/block-add-check-that-partition-length-needs-to-be-aligned-with-block-size.patch @@ -0,0 +1,62 @@ +From 6f64f866aa1ae6975c95d805ed51d7e9433a0016 Mon Sep 17 00:00:00 2001 +From: Min Li +Date: Thu, 29 Jun 2023 14:25:17 +0000 +Subject: block: add check that partition length needs to be aligned with block size + +From: Min Li + +commit 6f64f866aa1ae6975c95d805ed51d7e9433a0016 upstream. + +Before calling add partition or resize partition, there is no check +on whether the length is aligned with the logical block size. +If the logical block size of the disk is larger than 512 bytes, +then the partition size maybe not the multiple of the logical block size, +and when the last sector is read, bio_truncate() will adjust the bio size, +resulting in an IO error if the size of the read command is smaller than +the logical block size.If integrity data is supported, this will also +result in a null pointer dereference when calling bio_integrity_free. + +Cc: +Signed-off-by: Min Li +Reviewed-by: Damien Le Moal +Reviewed-by: Chaitanya Kulkarni +Reviewed-by: Christoph Hellwig +Link: https://lore.kernel.org/r/20230629142517.121241-1-min15.li@samsung.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + block/ioctl.c | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/block/ioctl.c ++++ b/block/ioctl.c +@@ -18,7 +18,7 @@ static int blkpg_do_ioctl(struct block_d + { + struct gendisk *disk = bdev->bd_disk; + struct blkpg_partition p; +- long long start, length; ++ sector_t start, length; + + if (disk->flags & GENHD_FL_NO_PART) + return -EINVAL; +@@ -35,14 +35,17 @@ static int blkpg_do_ioctl(struct block_d + if (op == BLKPG_DEL_PARTITION) + return bdev_del_partition(disk, p.pno); + ++ if (p.start < 0 || p.length <= 0 || p.start + p.length < 0) ++ return -EINVAL; ++ /* Check that the partition is aligned to the block size */ ++ if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev))) ++ return -EINVAL; ++ + start = p.start >> SECTOR_SHIFT; + length = p.length >> SECTOR_SHIFT; + + switch (op) { + case BLKPG_ADD_PARTITION: +- /* check if partition is aligned to blocksize */ +- if (p.start & (bdev_logical_block_size(bdev) - 1)) +- return -EINVAL; + return bdev_add_partition(disk, p.pno, start, length); + case BLKPG_RESIZE_PARTITION: + return bdev_resize_partition(disk, p.pno, start, length); diff --git a/queue-6.6/block-fix-iterating-over-an-empty-bio-with-bio_for_each_folio_all.patch b/queue-6.6/block-fix-iterating-over-an-empty-bio-with-bio_for_each_folio_all.patch new file mode 100644 index 00000000000..ac4b993c8a5 --- /dev/null +++ b/queue-6.6/block-fix-iterating-over-an-empty-bio-with-bio_for_each_folio_all.patch @@ -0,0 +1,52 @@ +From 7bed6f3d08b7af27b7015da8dc3acf2b9c1f21d7 Mon Sep 17 00:00:00 2001 +From: "Matthew Wilcox (Oracle)" +Date: Tue, 16 Jan 2024 21:29:59 +0000 +Subject: block: Fix iterating over an empty bio with bio_for_each_folio_all + +From: Matthew Wilcox (Oracle) + +commit 7bed6f3d08b7af27b7015da8dc3acf2b9c1f21d7 upstream. + +If the bio contains no data, bio_first_folio() calls page_folio() on a +NULL pointer and oopses. Move the test that we've reached the end of +the bio from bio_next_folio() to bio_first_folio(). + +Reported-by: syzbot+8b23309d5788a79d3eea@syzkaller.appspotmail.com +Reported-by: syzbot+004c1e0fced2b4bc3dcc@syzkaller.appspotmail.com +Fixes: 640d1930bef4 ("block: Add bio_for_each_folio_all()") +Cc: stable@vger.kernel.org +Signed-off-by: Matthew Wilcox (Oracle) +Link: https://lore.kernel.org/r/20240116212959.3413014-1-willy@infradead.org +[axboe: add unlikely() to error case] +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bio.h | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/include/linux/bio.h ++++ b/include/linux/bio.h +@@ -286,6 +286,11 @@ static inline void bio_first_folio(struc + { + struct bio_vec *bvec = bio_first_bvec_all(bio) + i; + ++ if (unlikely(i >= bio->bi_vcnt)) { ++ fi->folio = NULL; ++ return; ++ } ++ + fi->folio = page_folio(bvec->bv_page); + fi->offset = bvec->bv_offset + + PAGE_SIZE * (bvec->bv_page - &fi->folio->page); +@@ -303,10 +308,8 @@ static inline void bio_next_folio(struct + fi->offset = 0; + fi->length = min(folio_size(fi->folio), fi->_seg_count); + fi->_next = folio_next(fi->folio); +- } else if (fi->_i + 1 < bio->bi_vcnt) { +- bio_first_folio(fi, bio, fi->_i + 1); + } else { +- fi->folio = NULL; ++ bio_first_folio(fi, bio, fi->_i + 1); + } + } + diff --git a/queue-6.6/block-remove-special-casing-of-compound-pages.patch b/queue-6.6/block-remove-special-casing-of-compound-pages.patch new file mode 100644 index 00000000000..7044d382d69 --- /dev/null +++ b/queue-6.6/block-remove-special-casing-of-compound-pages.patch @@ -0,0 +1,134 @@ +From 1b151e2435fc3a9b10c8946c6aebe9f3e1938c55 Mon Sep 17 00:00:00 2001 +From: "Matthew Wilcox (Oracle)" +Date: Mon, 14 Aug 2023 15:41:00 +0100 +Subject: block: Remove special-casing of compound pages + +From: Matthew Wilcox (Oracle) + +commit 1b151e2435fc3a9b10c8946c6aebe9f3e1938c55 upstream. + +The special casing was originally added in pre-git history; reproducing +the commit log here: + +> commit a318a92567d77 +> Author: Andrew Morton +> Date: Sun Sep 21 01:42:22 2003 -0700 +> +> [PATCH] Speed up direct-io hugetlbpage handling +> +> This patch short-circuits all the direct-io page dirtying logic for +> higher-order pages. Without this, we pointlessly bounce BIOs up to +> keventd all the time. + +In the last twenty years, compound pages have become used for more than +just hugetlb. Rewrite these functions to operate on folios instead +of pages and remove the special case for hugetlbfs; I don't think +it's needed any more (and if it is, we can put it back in as a call +to folio_test_hugetlb()). + +This was found by inspection; as far as I can tell, this bug can lead +to pages used as the destination of a direct I/O read not being marked +as dirty. If those pages are then reclaimed by the MM without being +dirtied for some other reason, they won't be written out. Then when +they're faulted back in, they will not contain the data they should. +It'll take a pretty unusual setup to produce this problem with several +races all going the wrong way. + +This problem predates the folio work; it could for example have been +triggered by mmaping a THP in tmpfs and using that as the target of an +O_DIRECT read. + +Fixes: 800d8c63b2e98 ("shmem: add huge pages support") +Cc: +Signed-off-by: Matthew Wilcox (Oracle) +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + block/bio.c | 46 ++++++++++++++++++++++++---------------------- + 1 file changed, 24 insertions(+), 22 deletions(-) + +--- a/block/bio.c ++++ b/block/bio.c +@@ -1145,13 +1145,22 @@ EXPORT_SYMBOL(bio_add_folio); + + void __bio_release_pages(struct bio *bio, bool mark_dirty) + { +- struct bvec_iter_all iter_all; +- struct bio_vec *bvec; ++ struct folio_iter fi; + +- bio_for_each_segment_all(bvec, bio, iter_all) { +- if (mark_dirty && !PageCompound(bvec->bv_page)) +- set_page_dirty_lock(bvec->bv_page); +- bio_release_page(bio, bvec->bv_page); ++ bio_for_each_folio_all(fi, bio) { ++ struct page *page; ++ size_t done = 0; ++ ++ if (mark_dirty) { ++ folio_lock(fi.folio); ++ folio_mark_dirty(fi.folio); ++ folio_unlock(fi.folio); ++ } ++ page = folio_page(fi.folio, fi.offset / PAGE_SIZE); ++ do { ++ bio_release_page(bio, page++); ++ done += PAGE_SIZE; ++ } while (done < fi.length); + } + } + EXPORT_SYMBOL_GPL(__bio_release_pages); +@@ -1439,18 +1448,12 @@ EXPORT_SYMBOL(bio_free_pages); + * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions + * for performing direct-IO in BIOs. + * +- * The problem is that we cannot run set_page_dirty() from interrupt context ++ * The problem is that we cannot run folio_mark_dirty() from interrupt context + * because the required locks are not interrupt-safe. So what we can do is to + * mark the pages dirty _before_ performing IO. And in interrupt context, + * check that the pages are still dirty. If so, fine. If not, redirty them + * in process context. + * +- * We special-case compound pages here: normally this means reads into hugetlb +- * pages. The logic in here doesn't really work right for compound pages +- * because the VM does not uniformly chase down the head page in all cases. +- * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't +- * handle them at all. So we skip compound pages here at an early stage. +- * + * Note that this code is very hard to test under normal circumstances because + * direct-io pins the pages with get_user_pages(). This makes + * is_page_cache_freeable return false, and the VM will not clean the pages. +@@ -1466,12 +1469,12 @@ EXPORT_SYMBOL(bio_free_pages); + */ + void bio_set_pages_dirty(struct bio *bio) + { +- struct bio_vec *bvec; +- struct bvec_iter_all iter_all; ++ struct folio_iter fi; + +- bio_for_each_segment_all(bvec, bio, iter_all) { +- if (!PageCompound(bvec->bv_page)) +- set_page_dirty_lock(bvec->bv_page); ++ bio_for_each_folio_all(fi, bio) { ++ folio_lock(fi.folio); ++ folio_mark_dirty(fi.folio); ++ folio_unlock(fi.folio); + } + } + EXPORT_SYMBOL_GPL(bio_set_pages_dirty); +@@ -1515,12 +1518,11 @@ static void bio_dirty_fn(struct work_str + + void bio_check_pages_dirty(struct bio *bio) + { +- struct bio_vec *bvec; ++ struct folio_iter fi; + unsigned long flags; +- struct bvec_iter_all iter_all; + +- bio_for_each_segment_all(bvec, bio, iter_all) { +- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) ++ bio_for_each_folio_all(fi, bio) { ++ if (!folio_test_dirty(fi.folio)) + goto defer; + } + diff --git a/queue-6.6/bluetooth-fix-atomicity-violation-in-min-max-_key_size_set.patch b/queue-6.6/bluetooth-fix-atomicity-violation-in-min-max-_key_size_set.patch new file mode 100644 index 00000000000..259a01e3ee6 --- /dev/null +++ b/queue-6.6/bluetooth-fix-atomicity-violation-in-min-max-_key_size_set.patch @@ -0,0 +1,92 @@ +From da9065caa594d19b26e1a030fd0cc27bd365d685 Mon Sep 17 00:00:00 2001 +From: Gui-Dong Han <2045gemini@gmail.com> +Date: Fri, 22 Dec 2023 23:12:41 +0800 +Subject: Bluetooth: Fix atomicity violation in {min,max}_key_size_set + +From: Gui-Dong Han <2045gemini@gmail.com> + +commit da9065caa594d19b26e1a030fd0cc27bd365d685 upstream. + +In min_key_size_set(): + if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) + return -EINVAL; + hci_dev_lock(hdev); + hdev->le_min_key_size = val; + hci_dev_unlock(hdev); + +In max_key_size_set(): + if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) + return -EINVAL; + hci_dev_lock(hdev); + hdev->le_max_key_size = val; + hci_dev_unlock(hdev); + +The atomicity violation occurs due to concurrent execution of set_min and +set_max funcs.Consider a scenario where setmin writes a new, valid 'min' +value, and concurrently, setmax writes a value that is greater than the +old 'min' but smaller than the new 'min'. In this case, setmax might check +against the old 'min' value (before acquiring the lock) but write its +value after the 'min' has been updated by setmin. This leads to a +situation where the 'max' value ends up being smaller than the 'min' +value, which is an inconsistency. + +This possible bug is found by an experimental static analysis tool +developed by our team, BassCheck[1]. This tool analyzes the locking APIs +to extract function pairs that can be concurrently executed, and then +analyzes the instructions in the paired functions to identify possible +concurrency bugs including data races and atomicity violations. The above +possible bug is reported when our tool analyzes the source code of +Linux 5.17. + +To resolve this issue, it is suggested to encompass the validity checks +within the locked sections in both set_min and set_max funcs. The +modification ensures that the validation of 'val' against the +current min/max values is atomic, thus maintaining the integrity of the +settings. With this patch applied, our tool no longer reports the bug, +with the kernel configuration allyesconfig for x86_64. Due to the lack of +associated hardware, we cannot test the patch in runtime testing, and just +verify it according to the code logic. + +[1] https://sites.google.com/view/basscheck/ + +Fixes: 18f81241b74f ("Bluetooth: Move {min,max}_key_size debugfs ...") +Cc: stable@vger.kernel.org +Signed-off-by: Gui-Dong Han <2045gemini@gmail.com> +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Greg Kroah-Hartman +--- + net/bluetooth/hci_debugfs.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +--- a/net/bluetooth/hci_debugfs.c ++++ b/net/bluetooth/hci_debugfs.c +@@ -1046,10 +1046,12 @@ static int min_key_size_set(void *data, + { + struct hci_dev *hdev = data; + +- if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) ++ hci_dev_lock(hdev); ++ if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) { ++ hci_dev_unlock(hdev); + return -EINVAL; ++ } + +- hci_dev_lock(hdev); + hdev->le_min_key_size = val; + hci_dev_unlock(hdev); + +@@ -1074,10 +1076,12 @@ static int max_key_size_set(void *data, + { + struct hci_dev *hdev = data; + +- if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) ++ hci_dev_lock(hdev); ++ if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) { ++ hci_dev_unlock(hdev); + return -EINVAL; ++ } + +- hci_dev_lock(hdev); + hdev->le_max_key_size = val; + hci_dev_unlock(hdev); + diff --git a/queue-6.6/bpf-fix-re-attachment-branch-in-bpf_tracing_prog_attach.patch b/queue-6.6/bpf-fix-re-attachment-branch-in-bpf_tracing_prog_attach.patch new file mode 100644 index 00000000000..00d2e42026c --- /dev/null +++ b/queue-6.6/bpf-fix-re-attachment-branch-in-bpf_tracing_prog_attach.patch @@ -0,0 +1,79 @@ +From 715d82ba636cb3629a6e18a33bb9dbe53f9936ee Mon Sep 17 00:00:00 2001 +From: Jiri Olsa +Date: Wed, 3 Jan 2024 20:05:46 +0100 +Subject: bpf: Fix re-attachment branch in bpf_tracing_prog_attach + +From: Jiri Olsa + +commit 715d82ba636cb3629a6e18a33bb9dbe53f9936ee upstream. + +The following case can cause a crash due to missing attach_btf: + +1) load rawtp program +2) load fentry program with rawtp as target_fd +3) create tracing link for fentry program with target_fd = 0 +4) repeat 3 + +In the end we have: + +- prog->aux->dst_trampoline == NULL +- tgt_prog == NULL (because we did not provide target_fd to link_create) +- prog->aux->attach_btf == NULL (the program was loaded with attach_prog_fd=X) +- the program was loaded for tgt_prog but we have no way to find out which one + + BUG: kernel NULL pointer dereference, address: 0000000000000058 + Call Trace: + + ? __die+0x20/0x70 + ? page_fault_oops+0x15b/0x430 + ? fixup_exception+0x22/0x330 + ? exc_page_fault+0x6f/0x170 + ? asm_exc_page_fault+0x22/0x30 + ? bpf_tracing_prog_attach+0x279/0x560 + ? btf_obj_id+0x5/0x10 + bpf_tracing_prog_attach+0x439/0x560 + __sys_bpf+0x1cf4/0x2de0 + __x64_sys_bpf+0x1c/0x30 + do_syscall_64+0x41/0xf0 + entry_SYSCALL_64_after_hwframe+0x6e/0x76 + +Return -EINVAL in this situation. + +Fixes: f3a95075549e0 ("bpf: Allow trampoline re-attach for tracing and lsm programs") +Cc: stable@vger.kernel.org +Signed-off-by: Jiri Olsa +Acked-by: Jiri Olsa +Acked-by: Song Liu +Signed-off-by: Dmitrii Dolgov <9erthalion6@gmail.com> +Link: https://lore.kernel.org/r/20240103190559.14750-4-9erthalion6@gmail.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/syscall.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -3197,6 +3197,10 @@ static int bpf_tracing_prog_attach(struc + * + * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program + * was detached and is going for re-attachment. ++ * ++ * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf ++ * are NULL, then program was already attached and user did not provide ++ * tgt_prog_fd so we have no way to find out or create trampoline + */ + if (!prog->aux->dst_trampoline && !tgt_prog) { + /* +@@ -3210,6 +3214,11 @@ static int bpf_tracing_prog_attach(struc + err = -EINVAL; + goto out_unlock; + } ++ /* We can allow re-attach only if we have valid attach_btf. */ ++ if (!prog->aux->attach_btf) { ++ err = -EINVAL; ++ goto out_unlock; ++ } + btf_id = prog->aux->attach_btf_id; + key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); + } diff --git a/queue-6.6/drm-amd-enable-pcie-pme-from-d3.patch b/queue-6.6/drm-amd-enable-pcie-pme-from-d3.patch new file mode 100644 index 00000000000..e515d89ac5c --- /dev/null +++ b/queue-6.6/drm-amd-enable-pcie-pme-from-d3.patch @@ -0,0 +1,36 @@ +From bd1f6a31e7762ebc99b97f3eda5e5ea3708fa792 Mon Sep 17 00:00:00 2001 +From: Mario Limonciello +Date: Fri, 24 Nov 2023 09:56:32 -0600 +Subject: drm/amd: Enable PCIe PME from D3 + +From: Mario Limonciello + +commit bd1f6a31e7762ebc99b97f3eda5e5ea3708fa792 upstream. + +When dGPU is put into BOCO it may be in D3cold but still able send +PME on display hotplug event. For this to work it must be enabled +as wake source from D3. + +When runpm is enabled use pci_wake_from_d3() to mark wakeup as +enabled by default. + +Cc: stable@vger.kernel.org # 6.1+ +Signed-off-by: Mario Limonciello +Acked-by: Alex Deucher +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -2197,6 +2197,8 @@ retry_init: + + pci_wake_from_d3(pdev, TRUE); + ++ pci_wake_from_d3(pdev, TRUE); ++ + /* + * For runpm implemented via BACO, PMFW will handle the + * timing for BACO in and out: diff --git a/queue-6.6/fbdev-acornfb-fix-name-of-fb_ops-initializer-macro.patch b/queue-6.6/fbdev-acornfb-fix-name-of-fb_ops-initializer-macro.patch new file mode 100644 index 00000000000..ff3002e4566 --- /dev/null +++ b/queue-6.6/fbdev-acornfb-fix-name-of-fb_ops-initializer-macro.patch @@ -0,0 +1,38 @@ +From b48807788e7a2bd93044fe84cfe8ff64b85ec15e Mon Sep 17 00:00:00 2001 +From: Thomas Zimmermann +Date: Mon, 27 Nov 2023 14:15:30 +0100 +Subject: fbdev/acornfb: Fix name of fb_ops initializer macro + +From: Thomas Zimmermann + +commit b48807788e7a2bd93044fe84cfe8ff64b85ec15e upstream. + +Fix build by using the correct name for the initializer macro +for struct fb_ops. + +Signed-off-by: Thomas Zimmermann +Fixes: 9037afde8b9d ("fbdev/acornfb: Use fbdev I/O helpers") +Cc: Thomas Zimmermann +Cc: Sam Ravnborg +Cc: Helge Deller +Cc: Javier Martinez Canillas +Cc: Arnd Bergmann +Cc: # v6.6+ +Reviewed-by: Javier Martinez Canillas +Link: https://patchwork.freedesktop.org/patch/msgid/20231127131655.4020-2-tzimmermann@suse.de +Signed-off-by: Greg Kroah-Hartman +--- + drivers/video/fbdev/acornfb.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/video/fbdev/acornfb.c ++++ b/drivers/video/fbdev/acornfb.c +@@ -605,7 +605,7 @@ acornfb_pan_display(struct fb_var_screen + + static const struct fb_ops acornfb_ops = { + .owner = THIS_MODULE, +- FB_IOMEM_DEFAULT_OPS, ++ FB_DEFAULT_IOMEM_OPS, + .fb_check_var = acornfb_check_var, + .fb_set_par = acornfb_set_par, + .fb_setcolreg = acornfb_setcolreg, diff --git a/queue-6.6/fbdev-flush-deferred-io-before-closing.patch b/queue-6.6/fbdev-flush-deferred-io-before-closing.patch new file mode 100644 index 00000000000..a731b16a513 --- /dev/null +++ b/queue-6.6/fbdev-flush-deferred-io-before-closing.patch @@ -0,0 +1,46 @@ +From 33cd6ea9c0673517cdb06ad5c915c6f22e9615fc Mon Sep 17 00:00:00 2001 +From: Nam Cao +Date: Mon, 18 Dec 2023 10:57:31 +0100 +Subject: fbdev: flush deferred IO before closing + +From: Nam Cao + +commit 33cd6ea9c0673517cdb06ad5c915c6f22e9615fc upstream. + +When framebuffer gets closed, the queued deferred IO gets cancelled. This +can cause some last display data to vanish. This is problematic for users +who send a still image to the framebuffer, then close the file: the image +may never appear. + +To ensure none of display data get lost, flush the queued deferred IO +first before closing. + +Another possible solution is to delete the cancel_delayed_work_sync() +instead. The difference is that the display may appear some time after +closing. However, the clearing of page mapping after this needs to be +removed too, because the page mapping is used by the deferred work. It is +not completely obvious whether it is okay to not clear the page mapping. +For a patch intended for stable trees, go with the simple and obvious +solution. + +Fixes: 60b59beafba8 ("fbdev: mm: Deferred IO support") +Cc: stable@vger.kernel.org +Signed-off-by: Nam Cao +Reviewed-by: Sebastian Andrzej Siewior +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/video/fbdev/core/fb_defio.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/video/fbdev/core/fb_defio.c ++++ b/drivers/video/fbdev/core/fb_defio.c +@@ -313,7 +313,7 @@ static void fb_deferred_io_lastclose(str + struct page *page; + int i; + +- cancel_delayed_work_sync(&info->deferred_work); ++ flush_delayed_work(&info->deferred_work); + + /* clear out the mapping that we setup */ + for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { diff --git a/queue-6.6/fbdev-flush-deferred-work-in-fb_deferred_io_fsync.patch b/queue-6.6/fbdev-flush-deferred-work-in-fb_deferred_io_fsync.patch new file mode 100644 index 00000000000..443a2013c05 --- /dev/null +++ b/queue-6.6/fbdev-flush-deferred-work-in-fb_deferred_io_fsync.patch @@ -0,0 +1,51 @@ +From 15e4c1f462279b4e128f27de48133e0debe9e0df Mon Sep 17 00:00:00 2001 +From: Nam Cao +Date: Mon, 18 Dec 2023 10:57:30 +0100 +Subject: fbdev: flush deferred work in fb_deferred_io_fsync() + +From: Nam Cao + +commit 15e4c1f462279b4e128f27de48133e0debe9e0df upstream. + +The driver's fsync() is supposed to flush any pending operation to +hardware. It is implemented in this driver by cancelling the queued +deferred IO first, then schedule it for "immediate execution" by calling +schedule_delayed_work() again with delay=0. However, setting delay=0 +only means the work is scheduled immediately, it does not mean the work +is executed immediately. There is no guarantee that the work is finished +after schedule_delayed_work() returns. After this driver's fsync() +returns, there can still be pending work. Furthermore, if close() is +called by users immediately after fsync(), the pending work gets +cancelled and fsync() may do nothing. + +To ensure that the deferred IO completes, use flush_delayed_work() +instead. Write operations to this driver either write to the device +directly, or invoke schedule_delayed_work(); so by flushing the +workqueue, it can be guaranteed that all previous writes make it to the +device. + +Fixes: 5e841b88d23d ("fb: fsync() method for deferred I/O flush.") +Cc: stable@vger.kernel.org +Signed-off-by: Nam Cao +Reviewed-by: Sebastian Andrzej Siewior +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/video/fbdev/core/fb_defio.c | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +--- a/drivers/video/fbdev/core/fb_defio.c ++++ b/drivers/video/fbdev/core/fb_defio.c +@@ -132,11 +132,7 @@ int fb_deferred_io_fsync(struct file *fi + return 0; + + inode_lock(inode); +- /* Kill off the delayed work */ +- cancel_delayed_work_sync(&info->deferred_work); +- +- /* Run it immediately */ +- schedule_delayed_work(&info->deferred_work, 0); ++ flush_delayed_work(&info->deferred_work); + inode_unlock(inode); + + return 0; diff --git a/queue-6.6/io_uring-don-t-check-iopoll-if-request-completes.patch b/queue-6.6/io_uring-don-t-check-iopoll-if-request-completes.patch new file mode 100644 index 00000000000..62a05d2c59b --- /dev/null +++ b/queue-6.6/io_uring-don-t-check-iopoll-if-request-completes.patch @@ -0,0 +1,39 @@ +From 9b43ef3d52532a0175ed6654618f7db61d390d2e Mon Sep 17 00:00:00 2001 +From: Pavel Begunkov +Date: Fri, 1 Dec 2023 00:38:52 +0000 +Subject: io_uring: don't check iopoll if request completes + +From: Pavel Begunkov + +commit 9b43ef3d52532a0175ed6654618f7db61d390d2e upstream. + +IOPOLL request should never return IOU_OK, so the following iopoll +queueing check in io_issue_sqe() after getting IOU_OK doesn't make any +sense as would never turn true. Let's optimise on that and return a bit +earlier. It's also much more resilient to potential bugs from +mischieving iopoll implementations. + +Cc: +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/2f8690e2fa5213a2ff292fac29a7143c036cdd60.1701390926.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -1891,7 +1891,11 @@ static int io_issue_sqe(struct io_kiocb + io_req_complete_defer(req); + else + io_req_complete_post(req, issue_flags); +- } else if (ret != IOU_ISSUE_SKIP_COMPLETE) ++ ++ return 0; ++ } ++ ++ if (ret != IOU_ISSUE_SKIP_COMPLETE) + return ret; + + /* If the op doesn't have a file, we're not polling for it */ diff --git a/queue-6.6/io_uring-ensure-local-task_work-is-run-on-wait-timeout.patch b/queue-6.6/io_uring-ensure-local-task_work-is-run-on-wait-timeout.patch new file mode 100644 index 00000000000..953ecb89091 --- /dev/null +++ b/queue-6.6/io_uring-ensure-local-task_work-is-run-on-wait-timeout.patch @@ -0,0 +1,55 @@ +From 6ff1407e24e6fdfa4a16ba9ba551e3d253a26391 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Thu, 4 Jan 2024 12:21:08 -0700 +Subject: io_uring: ensure local task_work is run on wait timeout + +From: Jens Axboe + +commit 6ff1407e24e6fdfa4a16ba9ba551e3d253a26391 upstream. + +A previous commit added an earlier break condition here, which is fine if +we're using non-local task_work as it'll be run on return to userspace. +However, if DEFER_TASKRUN is used, then we could be leaving local +task_work that is ready to process in the ctx list until next time that +we enter the kernel to wait for events. + +Move the break condition to _after_ we have run task_work. + +Cc: stable@vger.kernel.org +Fixes: 846072f16eed ("io_uring: mimimise io_cqring_wait_schedule") +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -2630,8 +2630,6 @@ static int io_cqring_wait(struct io_ring + __set_current_state(TASK_RUNNING); + atomic_set(&ctx->cq_wait_nr, 0); + +- if (ret < 0) +- break; + /* + * Run task_work after scheduling and before io_should_wake(). + * If we got woken because of task_work being processed, run it +@@ -2641,6 +2639,18 @@ static int io_cqring_wait(struct io_ring + if (!llist_empty(&ctx->work_llist)) + io_run_local_work(ctx); + ++ /* ++ * Non-local task_work will be run on exit to userspace, but ++ * if we're using DEFER_TASKRUN, then we could have waited ++ * with a timeout for a number of requests. If the timeout ++ * hits, we could have some requests ready to process. Ensure ++ * this break is _after_ we have run task_work, to avoid ++ * deferring running potentially pending requests until the ++ * next time we wait for events. ++ */ ++ if (ret < 0) ++ break; ++ + check_cq = READ_ONCE(ctx->check_cq); + if (unlikely(check_cq)) { + /* let the caller flush overflows, retry */ diff --git a/queue-6.6/io_uring-rw-ensure-io-bytes_done-is-always-initialized.patch b/queue-6.6/io_uring-rw-ensure-io-bytes_done-is-always-initialized.patch new file mode 100644 index 00000000000..e1157b68aa2 --- /dev/null +++ b/queue-6.6/io_uring-rw-ensure-io-bytes_done-is-always-initialized.patch @@ -0,0 +1,50 @@ +From 0a535eddbe0dc1de4386046ab849f08aeb2f8faf Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Thu, 21 Dec 2023 08:49:18 -0700 +Subject: io_uring/rw: ensure io->bytes_done is always initialized + +From: Jens Axboe + +commit 0a535eddbe0dc1de4386046ab849f08aeb2f8faf upstream. + +If IOSQE_ASYNC is set and we fail importing an iovec for a readv or +writev request, then we leave ->bytes_done uninitialized and hence the +eventual failure CQE posted can potentially have a random res value +rather than the expected -EINVAL. + +Setup ->bytes_done before potentially failing, so we have a consistent +value if we fail the request early. + +Cc: stable@vger.kernel.org +Reported-by: xingwei lee +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/rw.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +--- a/io_uring/rw.c ++++ b/io_uring/rw.c +@@ -549,15 +549,19 @@ static inline int io_rw_prep_async(struc + struct iovec *iov; + int ret; + ++ iorw->bytes_done = 0; ++ iorw->free_iovec = NULL; ++ + /* submission path, ->uring_lock should already be taken */ + ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); + if (unlikely(ret < 0)) + return ret; + +- iorw->bytes_done = 0; +- iorw->free_iovec = iov; +- if (iov) ++ if (iov) { ++ iorw->free_iovec = iov; + req->flags |= REQ_F_NEED_CLEANUP; ++ } ++ + return 0; + } + diff --git a/queue-6.6/md-bypass-block-throttle-for-superblock-update.patch b/queue-6.6/md-bypass-block-throttle-for-superblock-update.patch new file mode 100644 index 00000000000..4eb2df7c07b --- /dev/null +++ b/queue-6.6/md-bypass-block-throttle-for-superblock-update.patch @@ -0,0 +1,43 @@ +From d6e035aad6c09991da1c667fb83419329a3baed8 Mon Sep 17 00:00:00 2001 +From: Junxiao Bi +Date: Wed, 8 Nov 2023 10:22:15 -0800 +Subject: md: bypass block throttle for superblock update + +From: Junxiao Bi + +commit d6e035aad6c09991da1c667fb83419329a3baed8 upstream. + +commit 5e2cf333b7bd ("md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d") +introduced a hung bug and will be reverted in next patch, since the issue +that commit is fixing is due to md superblock write is throttled by wbt, +to fix it, we can have superblock write bypass block layer throttle. + +Fixes: 5e2cf333b7bd ("md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d") +Cc: stable@vger.kernel.org # v5.19+ +Suggested-by: Yu Kuai +Signed-off-by: Junxiao Bi +Reviewed-by: Logan Gunthorpe +Reviewed-by: Yu Kuai +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20231108182216.73611-1-junxiao.bi@oracle.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/md/md.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -940,9 +940,10 @@ void md_super_write(struct mddev *mddev, + return; + + bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, +- 1, +- REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, +- GFP_NOIO, &mddev->sync_set); ++ 1, ++ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META ++ | REQ_PREFLUSH | REQ_FUA, ++ GFP_NOIO, &mddev->sync_set); + + atomic_inc(&rdev->nr_pending); + diff --git a/queue-6.6/md-raid1-use-blk_opf_t-for-read-and-write-operations.patch b/queue-6.6/md-raid1-use-blk_opf_t-for-read-and-write-operations.patch new file mode 100644 index 00000000000..bc93c90ceca --- /dev/null +++ b/queue-6.6/md-raid1-use-blk_opf_t-for-read-and-write-operations.patch @@ -0,0 +1,84 @@ +From 7dab24554dedd4e6f408af8eb2d25c89997a6a1f Mon Sep 17 00:00:00 2001 +From: Bart Van Assche +Date: Sun, 7 Jan 2024 16:12:23 -0800 +Subject: md/raid1: Use blk_opf_t for read and write operations + +From: Bart Van Assche + +commit 7dab24554dedd4e6f408af8eb2d25c89997a6a1f upstream. + +Use the type blk_opf_t for read and write operations instead of int. This +patch does not affect the generated code but fixes the following sparse +warning: + +drivers/md/raid1.c:1993:60: sparse: sparse: incorrect type in argument 5 (different base types) + expected restricted blk_opf_t [usertype] opf + got int rw + +Cc: Song Liu +Cc: Jens Axboe +Fixes: 3c5e514db58f ("md/raid1: Use the new blk_opf_t type") +Cc: stable@vger.kernel.org # v6.0+ +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202401080657.UjFnvQgX-lkp@intel.com/ +Signed-off-by: Bart Van Assche +Signed-off-by: Song Liu +Link: https://lore.kernel.org/r/20240108001223.23835-1-bvanassche@acm.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/md/raid1.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1983,12 +1983,12 @@ static void end_sync_write(struct bio *b + } + + static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, +- int sectors, struct page *page, int rw) ++ int sectors, struct page *page, blk_opf_t rw) + { + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) + /* success */ + return 1; +- if (rw == WRITE) { ++ if (rw == REQ_OP_WRITE) { + set_bit(WriteErrorSeen, &rdev->flags); + if (!test_and_set_bit(WantReplacement, + &rdev->flags)) +@@ -2105,7 +2105,7 @@ static int fix_sync_read_error(struct r1 + rdev = conf->mirrors[d].rdev; + if (r1_sync_page_io(rdev, sect, s, + pages[idx], +- WRITE) == 0) { ++ REQ_OP_WRITE) == 0) { + r1_bio->bios[d]->bi_end_io = NULL; + rdev_dec_pending(rdev, mddev); + } +@@ -2120,7 +2120,7 @@ static int fix_sync_read_error(struct r1 + rdev = conf->mirrors[d].rdev; + if (r1_sync_page_io(rdev, sect, s, + pages[idx], +- READ) != 0) ++ REQ_OP_READ) != 0) + atomic_add(s, &rdev->corrected_errors); + } + sectors -= s; +@@ -2332,7 +2332,7 @@ static void fix_read_error(struct r1conf + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + r1_sync_page_io(rdev, sect, s, +- conf->tmppage, WRITE); ++ conf->tmppage, REQ_OP_WRITE); + rdev_dec_pending(rdev, mddev); + } else + rcu_read_unlock(); +@@ -2349,7 +2349,7 @@ static void fix_read_error(struct r1conf + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + if (r1_sync_page_io(rdev, sect, s, +- conf->tmppage, READ)) { ++ conf->tmppage, REQ_OP_READ)) { + atomic_add(s, &rdev->corrected_errors); + pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n", + mdname(mddev), s, diff --git a/queue-6.6/netfilter-nf_tables-check-if-catch-all-set-element-is-active-in-next-generation.patch b/queue-6.6/netfilter-nf_tables-check-if-catch-all-set-element-is-active-in-next-generation.patch new file mode 100644 index 00000000000..41946806158 --- /dev/null +++ b/queue-6.6/netfilter-nf_tables-check-if-catch-all-set-element-is-active-in-next-generation.patch @@ -0,0 +1,35 @@ +From b1db244ffd041a49ecc9618e8feb6b5c1afcdaa7 Mon Sep 17 00:00:00 2001 +From: Pablo Neira Ayuso +Date: Fri, 12 Jan 2024 23:28:45 +0100 +Subject: netfilter: nf_tables: check if catch-all set element is active in next generation + +From: Pablo Neira Ayuso + +commit b1db244ffd041a49ecc9618e8feb6b5c1afcdaa7 upstream. + +When deactivating the catch-all set element, check the state in the next +generation that represents this transaction. + +This bug uncovered after the recent removal of the element busy mark +a2dd0233cbc4 ("netfilter: nf_tables: remove busy mark and gc batch API"). + +Fixes: aaa31047a6d2 ("netfilter: nftables: add catch-all set element support") +Cc: stable@vger.kernel.org +Reported-by: lonial con +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman +--- + net/netfilter/nf_tables_api.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -6431,7 +6431,7 @@ static int nft_setelem_catchall_deactiva + + list_for_each_entry(catchall, &set->catchall_list, list) { + ext = nft_set_elem_ext(set, catchall->elem); +- if (!nft_is_active(net, ext)) ++ if (!nft_is_active_next(net, ext)) + continue; + + kfree(elem->priv); diff --git a/queue-6.6/pwm-fix-out-of-bounds-access-in-of_pwm_single_xlate.patch b/queue-6.6/pwm-fix-out-of-bounds-access-in-of_pwm_single_xlate.patch new file mode 100644 index 00000000000..d47279375af --- /dev/null +++ b/queue-6.6/pwm-fix-out-of-bounds-access-in-of_pwm_single_xlate.patch @@ -0,0 +1,35 @@ +From a297d07b9a1e4fb8cda25a4a2363a507d294b7c9 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= +Date: Tue, 9 Jan 2024 22:34:31 +0100 +Subject: pwm: Fix out-of-bounds access in of_pwm_single_xlate() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Uwe Kleine-König + +commit a297d07b9a1e4fb8cda25a4a2363a507d294b7c9 upstream. + +With args->args_count == 2 args->args[2] is not defined. Actually the +flags are contained in args->args[1]. + +Fixes: 3ab7b6ac5d82 ("pwm: Introduce single-PWM of_xlate function") +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/243908750d306e018a3d4bf2eb745d53ab50f663.1704835845.git.u.kleine-koenig@pengutronix.de +Signed-off-by: Uwe Kleine-König +Signed-off-by: Greg Kroah-Hartman +--- + drivers/pwm/core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/pwm/core.c ++++ b/drivers/pwm/core.c +@@ -176,7 +176,7 @@ of_pwm_single_xlate(struct pwm_chip *chi + pwm->args.period = args->args[0]; + pwm->args.polarity = PWM_POLARITY_NORMAL; + +- if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED) ++ if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED) + pwm->args.polarity = PWM_POLARITY_INVERSED; + + return pwm; diff --git a/queue-6.6/pwm-jz4740-don-t-use-dev_err_probe-in-.request.patch b/queue-6.6/pwm-jz4740-don-t-use-dev_err_probe-in-.request.patch new file mode 100644 index 00000000000..00e92605812 --- /dev/null +++ b/queue-6.6/pwm-jz4740-don-t-use-dev_err_probe-in-.request.patch @@ -0,0 +1,47 @@ +From 9320fc509b87b4d795fb37112931e2f4f8b5c55f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= +Date: Sat, 6 Jan 2024 15:13:03 +0100 +Subject: pwm: jz4740: Don't use dev_err_probe() in .request() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Uwe Kleine-König + +commit 9320fc509b87b4d795fb37112931e2f4f8b5c55f upstream. + +dev_err_probe() is only supposed to be used in probe functions. While it +probably doesn't hurt, both the EPROBE_DEFER handling and calling +device_set_deferred_probe_reason() are conceptually wrong in the request +callback. So replace the call by dev_err() and a separate return +statement. + +This effectively reverts commit c0bfe9606e03 ("pwm: jz4740: Simplify +with dev_err_probe()"). + +Reviewed-by: Krzysztof Kozlowski +Link: https://lore.kernel.org/r/20240106141302.1253365-2-u.kleine-koenig@pengutronix.de +Fixes: c0bfe9606e03 ("pwm: jz4740: Simplify with dev_err_probe()") +Cc: stable@vger.kernel.org +Signed-off-by: Uwe Kleine-König +Signed-off-by: Greg Kroah-Hartman +--- + drivers/pwm/pwm-jz4740.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/drivers/pwm/pwm-jz4740.c ++++ b/drivers/pwm/pwm-jz4740.c +@@ -60,9 +60,10 @@ static int jz4740_pwm_request(struct pwm + snprintf(name, sizeof(name), "timer%u", pwm->hwpwm); + + clk = clk_get(chip->dev, name); +- if (IS_ERR(clk)) +- return dev_err_probe(chip->dev, PTR_ERR(clk), +- "Failed to get clock\n"); ++ if (IS_ERR(clk)) { ++ dev_err(chip->dev, "error %pe: Failed to get clock\n", clk); ++ return PTR_ERR(clk); ++ } + + err = clk_prepare_enable(clk); + if (err < 0) { diff --git a/queue-6.6/rootfs-fix-support-for-rootfstype-when-root-is-given.patch b/queue-6.6/rootfs-fix-support-for-rootfstype-when-root-is-given.patch new file mode 100644 index 00000000000..2ab6c940938 --- /dev/null +++ b/queue-6.6/rootfs-fix-support-for-rootfstype-when-root-is-given.patch @@ -0,0 +1,70 @@ +From 21528c69a0d8483f7c6345b1a0bc8d8975e9a172 Mon Sep 17 00:00:00 2001 +From: Stefan Berger +Date: Sun, 19 Nov 2023 20:12:48 -0500 +Subject: rootfs: Fix support for rootfstype= when root= is given + +From: Stefan Berger + +commit 21528c69a0d8483f7c6345b1a0bc8d8975e9a172 upstream. + +Documentation/filesystems/ramfs-rootfs-initramfs.rst states: + + If CONFIG_TMPFS is enabled, rootfs will use tmpfs instead of ramfs by + default. To force ramfs, add "rootfstype=ramfs" to the kernel command + line. + +This currently does not work when root= is provided since then +saved_root_name contains a string and rootfstype= is ignored. Therefore, +ramfs is currently always chosen when root= is provided. + +The current behavior for rootfs's filesystem is: + + root= | rootfstype= | chosen rootfs filesystem + ------------+-------------+-------------------------- + unspecified | unspecified | tmpfs + unspecified | tmpfs | tmpfs + unspecified | ramfs | ramfs + provided | ignored | ramfs + +rootfstype= should be respected regardless whether root= is given, +as shown below: + + root= | rootfstype= | chosen rootfs filesystem + ------------+-------------+-------------------------- + unspecified | unspecified | tmpfs (as before) + unspecified | tmpfs | tmpfs (as before) + unspecified | ramfs | ramfs (as before) + provided | unspecified | ramfs (compatibility with before) + provided | tmpfs | tmpfs (new) + provided | ramfs | ramfs (new) + +This table represents the new behavior. + +Fixes: 6e19eded3684 ("initmpfs: use initramfs if rootfstype= or root= specified") +Cc: +Signed-off-by: Rob Landley +Link: https://lore.kernel.org/lkml/8244c75f-445e-b15b-9dbf-266e7ca666e2@landley.net/ +Reviewed-and-Tested-by: Mimi Zohar +Signed-off-by: Stefan Berger +Link: https://lore.kernel.org/r/20231120011248.396012-1-stefanb@linux.ibm.com +Signed-off-by: Greg Kroah-Hartman +--- + init/do_mounts.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/init/do_mounts.c ++++ b/init/do_mounts.c +@@ -510,7 +510,10 @@ struct file_system_type rootfs_fs_type = + + void __init init_rootfs(void) + { +- if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] && +- (!root_fs_names || strstr(root_fs_names, "tmpfs"))) +- is_tmpfs = true; ++ if (IS_ENABLED(CONFIG_TMPFS)) { ++ if (!saved_root_name[0] && !root_fs_names) ++ is_tmpfs = true; ++ else if (root_fs_names && !!strstr(root_fs_names, "tmpfs")) ++ is_tmpfs = true; ++ } + } diff --git a/queue-6.6/scsi-mpi3mr-block-pel-enable-command-on-controller-reset-and-unrecoverable-state.patch b/queue-6.6/scsi-mpi3mr-block-pel-enable-command-on-controller-reset-and-unrecoverable-state.patch new file mode 100644 index 00000000000..ba0e5bff969 --- /dev/null +++ b/queue-6.6/scsi-mpi3mr-block-pel-enable-command-on-controller-reset-and-unrecoverable-state.patch @@ -0,0 +1,49 @@ +From f8fb3f39148e8010479e4b2003ba4728818ec661 Mon Sep 17 00:00:00 2001 +From: Chandrakanth patil +Date: Sun, 26 Nov 2023 11:01:33 +0530 +Subject: scsi: mpi3mr: Block PEL Enable Command on Controller Reset and Unrecoverable State + +From: Chandrakanth patil + +commit f8fb3f39148e8010479e4b2003ba4728818ec661 upstream. + +If a controller reset is underway or the controller is in an unrecoverable +state, the PEL enable management command will be returned as EAGAIN or +EFAULT. + +Cc: # v6.1+ +Co-developed-by: Sathya Prakash +Signed-off-by: Sathya Prakash +Signed-off-by: Chandrakanth patil +Link: https://lore.kernel.org/r/20231126053134.10133-4-chandrakanth.patil@broadcom.com +Signed-off-by: Martin K. Petersen +Signed-off-by: Greg Kroah-Hartman +--- + drivers/scsi/mpi3mr/mpi3mr_app.c | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +--- a/drivers/scsi/mpi3mr/mpi3mr_app.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c +@@ -223,6 +223,22 @@ static long mpi3mr_bsg_pel_enable(struct + return rval; + } + ++ if (mrioc->unrecoverable) { ++ dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", ++ __func__); ++ return -EFAULT; ++ } ++ ++ if (mrioc->reset_in_progress) { ++ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); ++ return -EAGAIN; ++ } ++ ++ if (mrioc->stop_bsgs) { ++ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); ++ return -EAGAIN; ++ } ++ + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + &pel_enable, sizeof(pel_enable)); diff --git a/queue-6.6/scsi-mpi3mr-clean-up-block-devices-post-controller-reset.patch b/queue-6.6/scsi-mpi3mr-clean-up-block-devices-post-controller-reset.patch new file mode 100644 index 00000000000..e32ae41c426 --- /dev/null +++ b/queue-6.6/scsi-mpi3mr-clean-up-block-devices-post-controller-reset.patch @@ -0,0 +1,53 @@ +From c01d515687e358b22aa8414d6dac60d7defa6eb9 Mon Sep 17 00:00:00 2001 +From: Chandrakanth patil +Date: Sun, 26 Nov 2023 11:01:32 +0530 +Subject: scsi: mpi3mr: Clean up block devices post controller reset + +From: Chandrakanth patil + +commit c01d515687e358b22aa8414d6dac60d7defa6eb9 upstream. + +After a controller reset, if the firmware changes the state of devices to +"hide", then remove those devices from the OS. + +Cc: # v6.6+ +Co-developed-by: Sathya Prakash +Signed-off-by: Sathya Prakash +Signed-off-by: Chandrakanth patil +Link: https://lore.kernel.org/r/20231126053134.10133-3-chandrakanth.patil@broadcom.com +Signed-off-by: Martin K. Petersen +Signed-off-by: Greg Kroah-Hartman +--- + drivers/scsi/mpi3mr/mpi3mr_os.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c +index 561fe0857bc0..872d4b809d08 100644 +--- a/drivers/scsi/mpi3mr/mpi3mr_os.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c +@@ -1047,8 +1047,9 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) + list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, + list) { + if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && +- tgtdev->host_exposed && tgtdev->starget && +- tgtdev->starget->hostdata) { ++ tgtdev->is_hidden && ++ tgtdev->host_exposed && tgtdev->starget && ++ tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + tgt_priv->dev_removed = 1; + atomic_set(&tgt_priv->block_io, 0); +@@ -1064,6 +1065,10 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); + mpi3mr_tgtdev_put(tgtdev); ++ } else if (tgtdev->is_hidden & tgtdev->host_exposed) { ++ dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", ++ tgtdev->perst_id); ++ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + } + } + +-- +2.43.0 + diff --git a/queue-6.6/scsi-mpi3mr-refresh-sdev-queue-depth-after-controller-reset.patch b/queue-6.6/scsi-mpi3mr-refresh-sdev-queue-depth-after-controller-reset.patch new file mode 100644 index 00000000000..3880ce49816 --- /dev/null +++ b/queue-6.6/scsi-mpi3mr-refresh-sdev-queue-depth-after-controller-reset.patch @@ -0,0 +1,42 @@ +From e5aab848dfdf7996d20ece4d28d2733c732c5e5a Mon Sep 17 00:00:00 2001 +From: Chandrakanth patil +Date: Sun, 26 Nov 2023 11:01:31 +0530 +Subject: scsi: mpi3mr: Refresh sdev queue depth after controller reset + +From: Chandrakanth patil + +commit e5aab848dfdf7996d20ece4d28d2733c732c5e5a upstream. + +After a controller reset, the firmware may modify the device queue depth. +Therefore, update the device queue depth accordingly. + +Cc: # v5.15+ +Co-developed-by: Sathya Prakash +Signed-off-by: Sathya Prakash +Signed-off-by: Chandrakanth patil +Link: https://lore.kernel.org/r/20231126053134.10133-2-chandrakanth.patil@broadcom.com +Signed-off-by: Martin K. Petersen +Signed-off-by: Greg Kroah-Hartman +--- + drivers/scsi/mpi3mr/mpi3mr_os.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/mpi3mr/mpi3mr_os.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c +@@ -1070,8 +1070,14 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr + tgtdev = NULL; + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && +- !tgtdev->is_hidden && !tgtdev->host_exposed) +- mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); ++ !tgtdev->is_hidden) { ++ if (!tgtdev->host_exposed) ++ mpi3mr_report_tgtdev_to_host(mrioc, ++ tgtdev->perst_id); ++ else if (tgtdev->starget) ++ starget_for_each_device(tgtdev->starget, ++ (void *)tgtdev, mpi3mr_update_sdev); ++ } + } + } + diff --git a/queue-6.6/scsi-target-core-add-missing-file_-start-end-_write.patch b/queue-6.6/scsi-target-core-add-missing-file_-start-end-_write.patch new file mode 100644 index 00000000000..7b3c3083254 --- /dev/null +++ b/queue-6.6/scsi-target-core-add-missing-file_-start-end-_write.patch @@ -0,0 +1,60 @@ +From 0db1d53937fafa8bb96e077375691e16902f4899 Mon Sep 17 00:00:00 2001 +From: Amir Goldstein +Date: Thu, 23 Nov 2023 11:20:00 +0200 +Subject: scsi: target: core: add missing file_{start,end}_write() + +From: Amir Goldstein + +commit 0db1d53937fafa8bb96e077375691e16902f4899 upstream. + +The callers of vfs_iter_write() are required to hold file_start_write(). +file_start_write() is a no-op for the S_ISBLK() case, but it is really +needed when the backing file is a regular file. + +We are going to move file_{start,end}_write() into vfs_iter_write(), but +we need to fix this first, so that the fix could be backported to stable +kernels. + +Suggested-by: Christoph Hellwig +Link: https://lore.kernel.org/r/ZV8ETIpM+wZa33B5@infradead.org/ +Cc: +Signed-off-by: Amir Goldstein +Link: https://lore.kernel.org/r/20231123092000.2665902-1-amir73il@gmail.com +Acked-by: Martin K. Petersen +Reviewed-by: Christoph Hellwig +Reviewed-by: Jens Axboe +Signed-off-by: Christian Brauner +Signed-off-by: Greg Kroah-Hartman +--- + drivers/target/target_core_file.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -332,11 +332,13 @@ static int fd_do_rw(struct se_cmd *cmd, + } + + iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); +- if (is_write) ++ if (is_write) { ++ file_start_write(fd); + ret = vfs_iter_write(fd, &iter, &pos, 0); +- else ++ file_end_write(fd); ++ } else { + ret = vfs_iter_read(fd, &iter, &pos, 0); +- ++ } + if (is_write) { + if (ret < 0 || ret != data_length) { + pr_err("%s() write returned %d\n", __func__, ret); +@@ -467,7 +469,9 @@ fd_execute_write_same(struct se_cmd *cmd + } + + iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len); ++ file_start_write(fd_dev->fd_file); + ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); ++ file_end_write(fd_dev->fd_file); + + kfree(bvec); + if (ret < 0 || ret != len) { diff --git a/queue-6.6/scsi-ufs-core-simplify-power-management-during-async-scan.patch b/queue-6.6/scsi-ufs-core-simplify-power-management-during-async-scan.patch new file mode 100644 index 00000000000..9311446bb1e --- /dev/null +++ b/queue-6.6/scsi-ufs-core-simplify-power-management-during-async-scan.patch @@ -0,0 +1,54 @@ +From daf7795406bf307997366f694888bd317ae5b5fa Mon Sep 17 00:00:00 2001 +From: Bart Van Assche +Date: Mon, 18 Dec 2023 14:52:14 -0800 +Subject: scsi: ufs: core: Simplify power management during async scan + +From: Bart Van Assche + +commit daf7795406bf307997366f694888bd317ae5b5fa upstream. + +ufshcd_init() calls pm_runtime_get_sync() before it calls +async_schedule(). ufshcd_async_scan() calls pm_runtime_put_sync() directly +or indirectly from ufshcd_add_lus(). Simplify ufshcd_async_scan() by always +calling pm_runtime_put_sync() from ufshcd_async_scan(). + +Cc: +Signed-off-by: Bart Van Assche +Link: https://lore.kernel.org/r/20231218225229.2542156-2-bvanassche@acm.org +Reviewed-by: Can Guo +Reviewed-by: Manivannan Sadhasivam +Signed-off-by: Martin K. Petersen +Signed-off-by: Greg Kroah-Hartman +--- + drivers/ufs/core/ufshcd.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -8540,7 +8540,6 @@ static int ufshcd_add_lus(struct ufs_hba + + ufs_bsg_probe(hba); + scsi_scan_host(hba->host); +- pm_runtime_put_sync(hba->dev); + + out: + return ret; +@@ -8808,15 +8807,15 @@ static void ufshcd_async_scan(void *data + + /* Probe and add UFS logical units */ + ret = ufshcd_add_lus(hba); ++ + out: ++ pm_runtime_put_sync(hba->dev); + /* + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. + */ +- if (ret) { +- pm_runtime_put_sync(hba->dev); ++ if (ret) + ufshcd_hba_exit(hba); +- } + } + + static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) diff --git a/queue-6.6/series b/queue-6.6/series index 1a37376bd5c..313f15a375b 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -385,3 +385,26 @@ ksmbd-fix-uaf-issue-in-ksmbd_tcp_new_connection.patch ksmbd-only-v2-leases-handle-the-directory.patch ceph-select-fs_encryption_algs-if-fs_encryption.patch loongarch-fix-and-simplify-fcsr-initialization-on-execve.patch +io_uring-don-t-check-iopoll-if-request-completes.patch +io_uring-rw-ensure-io-bytes_done-is-always-initialized.patch +io_uring-ensure-local-task_work-is-run-on-wait-timeout.patch +fbdev-acornfb-fix-name-of-fb_ops-initializer-macro.patch +fbdev-flush-deferred-work-in-fb_deferred_io_fsync.patch +fbdev-flush-deferred-io-before-closing.patch +scsi-ufs-core-simplify-power-management-during-async-scan.patch +scsi-target-core-add-missing-file_-start-end-_write.patch +scsi-mpi3mr-refresh-sdev-queue-depth-after-controller-reset.patch +scsi-mpi3mr-clean-up-block-devices-post-controller-reset.patch +scsi-mpi3mr-block-pel-enable-command-on-controller-reset-and-unrecoverable-state.patch +md-bypass-block-throttle-for-superblock-update.patch +drm-amd-enable-pcie-pme-from-d3.patch +block-add-check-that-partition-length-needs-to-be-aligned-with-block-size.patch +block-remove-special-casing-of-compound-pages.patch +block-fix-iterating-over-an-empty-bio-with-bio_for_each_folio_all.patch +netfilter-nf_tables-check-if-catch-all-set-element-is-active-in-next-generation.patch +pwm-jz4740-don-t-use-dev_err_probe-in-.request.patch +pwm-fix-out-of-bounds-access-in-of_pwm_single_xlate.patch +md-raid1-use-blk_opf_t-for-read-and-write-operations.patch +rootfs-fix-support-for-rootfstype-when-root-is-given.patch +bluetooth-fix-atomicity-violation-in-min-max-_key_size_set.patch +bpf-fix-re-attachment-branch-in-bpf_tracing_prog_attach.patch -- 2.47.3