]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 7 May 2023 13:13:39 +0000 (15:13 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 7 May 2023 13:13:39 +0000 (15:13 +0200)
added patches:
btrfs-scrub-reject-unsupported-scrub-flags.patch
dm-clone-call-kmem_cache_destroy-in-dm_clone_init-error-path.patch
dm-don-t-lock-fs-when-the-map-is-null-in-process-of-resume.patch
dm-flakey-fix-a-crash-with-invalid-table-line.patch
dm-integrity-call-kmem_cache_destroy-in-dm_integrity_init-error-path.patch
dm-ioctl-fix-nested-locking-in-table_clear-to-remove-deadlock-concern.patch
dm-verity-fix-error-handling-for-check_at_most_once-on-fec.patch
ia64-fix-an-addr-to-taddr-in-huge_pte_offset.patch
mm-mempolicy-correctly-update-prev-when-policy-is-equal-on-mbind.patch
s390-dasd-fix-hanging-blockdevice-after-request-requeue.patch
scripts-gdb-fix-lx-timerlist-for-python3.patch
vhost_vdpa-fix-unmap-process-in-no-batch-mode.patch

13 files changed:
queue-6.1/btrfs-scrub-reject-unsupported-scrub-flags.patch [new file with mode: 0644]
queue-6.1/dm-clone-call-kmem_cache_destroy-in-dm_clone_init-error-path.patch [new file with mode: 0644]
queue-6.1/dm-don-t-lock-fs-when-the-map-is-null-in-process-of-resume.patch [new file with mode: 0644]
queue-6.1/dm-flakey-fix-a-crash-with-invalid-table-line.patch [new file with mode: 0644]
queue-6.1/dm-integrity-call-kmem_cache_destroy-in-dm_integrity_init-error-path.patch [new file with mode: 0644]
queue-6.1/dm-ioctl-fix-nested-locking-in-table_clear-to-remove-deadlock-concern.patch [new file with mode: 0644]
queue-6.1/dm-verity-fix-error-handling-for-check_at_most_once-on-fec.patch [new file with mode: 0644]
queue-6.1/ia64-fix-an-addr-to-taddr-in-huge_pte_offset.patch [new file with mode: 0644]
queue-6.1/mm-mempolicy-correctly-update-prev-when-policy-is-equal-on-mbind.patch [new file with mode: 0644]
queue-6.1/s390-dasd-fix-hanging-blockdevice-after-request-requeue.patch [new file with mode: 0644]
queue-6.1/scripts-gdb-fix-lx-timerlist-for-python3.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/vhost_vdpa-fix-unmap-process-in-no-batch-mode.patch [new file with mode: 0644]

diff --git a/queue-6.1/btrfs-scrub-reject-unsupported-scrub-flags.patch b/queue-6.1/btrfs-scrub-reject-unsupported-scrub-flags.patch
new file mode 100644 (file)
index 0000000..a39baf2
--- /dev/null
@@ -0,0 +1,57 @@
+From 604e6681e114d05a2e384c4d1e8ef81918037ef5 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 6 Apr 2023 13:00:34 +0800
+Subject: btrfs: scrub: reject unsupported scrub flags
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit 604e6681e114d05a2e384c4d1e8ef81918037ef5 upstream.
+
+Since the introduction of scrub interface, the only flag that we support
+is BTRFS_SCRUB_READONLY.  Thus there is no sanity checks, if there are
+some undefined flags passed in, we just ignore them.
+
+This is problematic if we want to introduce new scrub flags, as we have
+no way to determine if such flags are supported.
+
+Address the problem by introducing a check for the flags, and if
+unsupported flags are set, return -EOPNOTSUPP to inform the user space.
+
+This check should be backported for all supported kernels before any new
+scrub flags are introduced.
+
+CC: stable@vger.kernel.org # 4.14+
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/ioctl.c           |    5 +++++
+ include/uapi/linux/btrfs.h |    1 +
+ 2 files changed, 6 insertions(+)
+
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4050,6 +4050,11 @@ static long btrfs_ioctl_scrub(struct fil
+       if (IS_ERR(sa))
+               return PTR_ERR(sa);
++      if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
++              ret = -EOPNOTSUPP;
++              goto out;
++      }
++
+       if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
+               ret = mnt_want_write_file(file);
+               if (ret)
+--- a/include/uapi/linux/btrfs.h
++++ b/include/uapi/linux/btrfs.h
+@@ -181,6 +181,7 @@ struct btrfs_scrub_progress {
+ };
+ #define BTRFS_SCRUB_READONLY  1
++#define BTRFS_SCRUB_SUPPORTED_FLAGS   (BTRFS_SCRUB_READONLY)
+ struct btrfs_ioctl_scrub_args {
+       __u64 devid;                            /* in */
+       __u64 start;                            /* in */
diff --git a/queue-6.1/dm-clone-call-kmem_cache_destroy-in-dm_clone_init-error-path.patch b/queue-6.1/dm-clone-call-kmem_cache_destroy-in-dm_clone_init-error-path.patch
new file mode 100644 (file)
index 0000000..86d146f
--- /dev/null
@@ -0,0 +1,28 @@
+From 6827af4a9a9f5bb664c42abf7c11af4978d72201 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@kernel.org>
+Date: Tue, 4 Apr 2023 11:59:00 -0400
+Subject: dm clone: call kmem_cache_destroy() in dm_clone_init() error path
+
+From: Mike Snitzer <snitzer@kernel.org>
+
+commit 6827af4a9a9f5bb664c42abf7c11af4978d72201 upstream.
+
+Otherwise the _hydration_cache will leak if dm_register_target() fails.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-clone-target.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -2205,6 +2205,7 @@ static int __init dm_clone_init(void)
+       r = dm_register_target(&clone_target);
+       if (r < 0) {
+               DMERR("Failed to register clone target");
++              kmem_cache_destroy(_hydration_cache);
+               return r;
+       }
diff --git a/queue-6.1/dm-don-t-lock-fs-when-the-map-is-null-in-process-of-resume.patch b/queue-6.1/dm-don-t-lock-fs-when-the-map-is-null-in-process-of-resume.patch
new file mode 100644 (file)
index 0000000..bc344c0
--- /dev/null
@@ -0,0 +1,81 @@
+From 38d11da522aacaa05898c734a1cec86f1e611129 Mon Sep 17 00:00:00 2001
+From: Li Lingfeng <lilingfeng3@huawei.com>
+Date: Tue, 18 Apr 2023 16:38:04 +0800
+Subject: dm: don't lock fs when the map is NULL in process of resume
+
+From: Li Lingfeng <lilingfeng3@huawei.com>
+
+commit 38d11da522aacaa05898c734a1cec86f1e611129 upstream.
+
+Commit fa247089de99 ("dm: requeue IO if mapping table not yet available")
+added a detection of whether the mapping table is available in the IO
+submission process. If the mapping table is unavailable, it returns
+BLK_STS_RESOURCE and requeues the IO.
+This can lead to the following deadlock problem:
+
+dm create                                      mount
+ioctl(DM_DEV_CREATE_CMD)
+ioctl(DM_TABLE_LOAD_CMD)
+                               do_mount
+                                vfs_get_tree
+                                 ext4_get_tree
+                                  get_tree_bdev
+                                   sget_fc
+                                    alloc_super
+                                     // got &s->s_umount
+                                     down_write_nested(&s->s_umount, ...);
+                                   ext4_fill_super
+                                    ext4_load_super
+                                     ext4_read_bh
+                                      submit_bio
+                                      // submit and wait io end
+ioctl(DM_DEV_SUSPEND_CMD)
+dev_suspend
+ do_resume
+  dm_suspend
+   __dm_suspend
+    lock_fs
+     freeze_bdev
+      get_active_super
+       grab_super
+        // wait for &s->s_umount
+        down_write(&s->s_umount);
+  dm_swap_table
+   __bind
+    // set md->map(can't get here)
+
+IO will be continuously requeued while holding the lock since mapping
+table is NULL. At the same time, mapping table won't be set since the
+lock is not available.
+Like request-based DM, bio-based DM also has the same problem.
+
+It's not proper to just abort IO if the mapping table not available.
+So clear DM_SKIP_LOCKFS_FLAG when the mapping table is NULL, this
+allows the DM table to be loaded and the IO submitted upon resume.
+
+Fixes: fa247089de99 ("dm: requeue IO if mapping table not yet available")
+Cc: stable@vger.kernel.org
+Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-ioctl.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1151,10 +1151,13 @@ static int do_resume(struct dm_ioctl *pa
+       /* Do we need to load a new map ? */
+       if (new_map) {
+               sector_t old_size, new_size;
++              int srcu_idx;
+               /* Suspend if it isn't already suspended */
+-              if (param->flags & DM_SKIP_LOCKFS_FLAG)
++              old_map = dm_get_live_table(md, &srcu_idx);
++              if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
+                       suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
++              dm_put_live_table(md, srcu_idx);
+               if (param->flags & DM_NOFLUSH_FLAG)
+                       suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+               if (!dm_suspended_md(md))
diff --git a/queue-6.1/dm-flakey-fix-a-crash-with-invalid-table-line.patch b/queue-6.1/dm-flakey-fix-a-crash-with-invalid-table-line.patch
new file mode 100644 (file)
index 0000000..dc28c69
--- /dev/null
@@ -0,0 +1,37 @@
+From 98dba02d9a93eec11bffbb93c7c51624290702d2 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 18 Apr 2023 15:57:47 -0400
+Subject: dm flakey: fix a crash with invalid table line
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 98dba02d9a93eec11bffbb93c7c51624290702d2 upstream.
+
+This command will crash with NULL pointer dereference:
+ dmsetup create flakey --table \
+  "0 `blockdev --getsize /dev/ram0` flakey /dev/ram0 0 0 1 2 corrupt_bio_byte 512"
+
+Fix the crash by checking if arg_name is non-NULL before comparing it.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-flakey.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -124,9 +124,9 @@ static int parse_features(struct dm_arg_
+                        * Direction r or w?
+                        */
+                       arg_name = dm_shift_arg(as);
+-                      if (!strcasecmp(arg_name, "w"))
++                      if (arg_name && !strcasecmp(arg_name, "w"))
+                               fc->corrupt_bio_rw = WRITE;
+-                      else if (!strcasecmp(arg_name, "r"))
++                      else if (arg_name && !strcasecmp(arg_name, "r"))
+                               fc->corrupt_bio_rw = READ;
+                       else {
+                               ti->error = "Invalid corrupt bio direction (r or w)";
diff --git a/queue-6.1/dm-integrity-call-kmem_cache_destroy-in-dm_integrity_init-error-path.patch b/queue-6.1/dm-integrity-call-kmem_cache_destroy-in-dm_integrity_init-error-path.patch
new file mode 100644 (file)
index 0000000..d0ffe5b
--- /dev/null
@@ -0,0 +1,37 @@
+From 6b79a428c02769f2a11f8ae76bf866226d134887 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@kernel.org>
+Date: Tue, 4 Apr 2023 13:34:28 -0400
+Subject: dm integrity: call kmem_cache_destroy() in dm_integrity_init() error path
+
+From: Mike Snitzer <snitzer@kernel.org>
+
+commit 6b79a428c02769f2a11f8ae76bf866226d134887 upstream.
+
+Otherwise the journal_io_cache will leak if dm_register_target() fails.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-integrity.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4646,11 +4646,13 @@ static int __init dm_integrity_init(void
+       }
+       r = dm_register_target(&integrity_target);
+-
+-      if (r < 0)
++      if (r < 0) {
+               DMERR("register failed %d", r);
++              kmem_cache_destroy(journal_io_cache);
++              return r;
++      }
+-      return r;
++      return 0;
+ }
+ static void __exit dm_integrity_exit(void)
diff --git a/queue-6.1/dm-ioctl-fix-nested-locking-in-table_clear-to-remove-deadlock-concern.patch b/queue-6.1/dm-ioctl-fix-nested-locking-in-table_clear-to-remove-deadlock-concern.patch
new file mode 100644 (file)
index 0000000..542d353
--- /dev/null
@@ -0,0 +1,58 @@
+From 3d32aaa7e66d5c1479a3c31d6c2c5d45dd0d3b89 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@kernel.org>
+Date: Mon, 17 Apr 2023 11:59:56 -0400
+Subject: dm ioctl: fix nested locking in table_clear() to remove deadlock concern
+
+From: Mike Snitzer <snitzer@kernel.org>
+
+commit 3d32aaa7e66d5c1479a3c31d6c2c5d45dd0d3b89 upstream.
+
+syzkaller found the following problematic rwsem locking (with write
+lock already held):
+
+ down_read+0x9d/0x450 kernel/locking/rwsem.c:1509
+ dm_get_inactive_table+0x2b/0xc0 drivers/md/dm-ioctl.c:773
+ __dev_status+0x4fd/0x7c0 drivers/md/dm-ioctl.c:844
+ table_clear+0x197/0x280 drivers/md/dm-ioctl.c:1537
+
+In table_clear, it first acquires a write lock
+https://elixir.bootlin.com/linux/v6.2/source/drivers/md/dm-ioctl.c#L1520
+down_write(&_hash_lock);
+
+Then before the lock is released at L1539, there is a path shown above:
+table_clear -> __dev_status -> dm_get_inactive_table ->  down_read
+https://elixir.bootlin.com/linux/v6.2/source/drivers/md/dm-ioctl.c#L773
+down_read(&_hash_lock);
+
+It tries to acquire the same read lock again, resulting in the deadlock
+problem.
+
+Fix this by moving table_clear()'s __dev_status() call to after its
+up_write(&_hash_lock);
+
+Cc: stable@vger.kernel.org
+Reported-by: Zheng Zhang <zheng.zhang@email.ucr.edu>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-ioctl.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1539,11 +1539,12 @@ static int table_clear(struct file *filp
+               has_new_map = true;
+       }
+-      param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+-
+-      __dev_status(hc->md, param);
+       md = hc->md;
+       up_write(&_hash_lock);
++
++      param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++      __dev_status(md, param);
++
+       if (old_map) {
+               dm_sync_table(md);
+               dm_table_destroy(old_map);
diff --git a/queue-6.1/dm-verity-fix-error-handling-for-check_at_most_once-on-fec.patch b/queue-6.1/dm-verity-fix-error-handling-for-check_at_most_once-on-fec.patch
new file mode 100644 (file)
index 0000000..d163102
--- /dev/null
@@ -0,0 +1,44 @@
+From e8c5d45f82ce0c238a4817739892fe8897a3dcc3 Mon Sep 17 00:00:00 2001
+From: Yeongjin Gil <youngjin.gil@samsung.com>
+Date: Mon, 20 Mar 2023 15:59:32 +0900
+Subject: dm verity: fix error handling for check_at_most_once on FEC
+
+From: Yeongjin Gil <youngjin.gil@samsung.com>
+
+commit e8c5d45f82ce0c238a4817739892fe8897a3dcc3 upstream.
+
+In verity_end_io(), if bi_status is not BLK_STS_OK, it can be return
+directly. But if FEC configured, it is desired to correct the data page
+through verity_verify_io. And the return value will be converted to
+blk_status and passed to verity_finish_io().
+
+BTW, when a bit is set in v->validated_blocks, verity_verify_io() skips
+verification regardless of I/O error for the corresponding bio. In this
+case, the I/O error could not be returned properly, and as a result,
+there is a problem that abnormal data could be read for the
+corresponding block.
+
+To fix this problem, when an I/O error occurs, do not skip verification
+even if the bit related is set in v->validated_blocks.
+
+Fixes: 843f38d382b1 ("dm verity: add 'check_at_most_once' option to only validate hashes once")
+Cc: stable@vger.kernel.org
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Yeongjin Gil <youngjin.gil@samsung.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-verity-target.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -523,7 +523,7 @@ static int verity_verify_io(struct dm_ve
+               sector_t cur_block = io->block + b;
+               struct ahash_request *req = verity_io_hash_req(v, io);
+-              if (v->validated_blocks &&
++              if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
+                   likely(test_bit(cur_block, v->validated_blocks))) {
+                       verity_bv_skip_block(v, io, iter);
+                       continue;
diff --git a/queue-6.1/ia64-fix-an-addr-to-taddr-in-huge_pte_offset.patch b/queue-6.1/ia64-fix-an-addr-to-taddr-in-huge_pte_offset.patch
new file mode 100644 (file)
index 0000000..bdde4fe
--- /dev/null
@@ -0,0 +1,36 @@
+From 3647ebcfbfca384840231fe13fae665453238a61 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Sun, 16 Apr 2023 22:17:05 -0700
+Subject: ia64: fix an addr to taddr in huge_pte_offset()
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 3647ebcfbfca384840231fe13fae665453238a61 upstream.
+
+I know nothing of ia64 htlbpage_to_page(), but guess that the p4d
+line should be using taddr rather than addr, like everywhere else.
+
+Link: https://lkml.kernel.org/r/732eae88-3beb-246-2c72-281de786740@google.com
+Fixes: c03ab9e32a2c ("ia64: add support for folded p4d page tables")
+Signed-off-by: Hugh Dickins <hughd@google.com
+Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/ia64/mm/hugetlbpage.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, u
+       pgd = pgd_offset(mm, taddr);
+       if (pgd_present(*pgd)) {
+-              p4d = p4d_offset(pgd, addr);
++              p4d = p4d_offset(pgd, taddr);
+               if (p4d_present(*p4d)) {
+                       pud = pud_offset(p4d, taddr);
+                       if (pud_present(*pud)) {
diff --git a/queue-6.1/mm-mempolicy-correctly-update-prev-when-policy-is-equal-on-mbind.patch b/queue-6.1/mm-mempolicy-correctly-update-prev-when-policy-is-equal-on-mbind.patch
new file mode 100644 (file)
index 0000000..ddf9fd3
--- /dev/null
@@ -0,0 +1,71 @@
+From 00ca0f2e86bf40b016a646e6323a8941a09cf106 Mon Sep 17 00:00:00 2001
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+Date: Sun, 30 Apr 2023 16:07:07 +0100
+Subject: mm/mempolicy: correctly update prev when policy is equal on mbind
+
+From: Lorenzo Stoakes <lstoakes@gmail.com>
+
+commit 00ca0f2e86bf40b016a646e6323a8941a09cf106 upstream.
+
+The refactoring in commit f4e9e0e69468 ("mm/mempolicy: fix use-after-free
+of VMA iterator") introduces a subtle bug which arises when attempting to
+apply a new NUMA policy across a range of VMAs in mbind_range().
+
+The refactoring passes a **prev pointer to keep track of the previous VMA
+in order to reduce duplication, and in all but one case it keeps this
+correctly updated.
+
+The bug arises when a VMA within the specified range has an equivalent
+policy as determined by mpol_equal() - which unlike other cases, does not
+update prev.
+
+This can result in a situation where, later in the iteration, a VMA is
+found whose policy does need to change.  At this point, vma_merge() is
+invoked with prev pointing to a VMA which is before the previous VMA.
+
+Since vma_merge() discovers the curr VMA by looking for the one
+immediately after prev, it will now be in a situation where this VMA is
+incorrect and the merge will not proceed correctly.
+
+This is checked in the VM_WARN_ON() invariant case with end >
+curr->vm_end, which, if a merge is possible, results in a warning (if
+CONFIG_DEBUG_VM is specified).
+
+I note that vma_merge() performs these invariant checks only after
+merge_prev/merge_next are checked, which is debatable as it hides this
+issue if no merge is possible even though a buggy situation has arisen.
+
+The solution is simply to update the prev pointer even when policies are
+equal.
+
+This caused a bug to arise in the 6.2.y stable tree, and this patch
+resolves this bug.
+
+Link: https://lkml.kernel.org/r/83f1d612acb519d777bebf7f3359317c4e7f4265.1682866629.git.lstoakes@gmail.com
+Fixes: f4e9e0e69468 ("mm/mempolicy: fix use-after-free of VMA iterator")
+Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
+Reported-by: kernel test robot <oliver.sang@intel.com>
+  Link: https://lore.kernel.org/oe-lkp/202304292203.44ddeff6-oliver.sang@intel.com
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mempolicy.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -802,8 +802,10 @@ static int mbind_range(struct vma_iterat
+               vmstart = vma->vm_start;
+       }
+-      if (mpol_equal(vma_policy(vma), new_pol))
++      if (mpol_equal(vma_policy(vma), new_pol)) {
++              *prev = vma;
+               return 0;
++      }
+       pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
+       merged = vma_merge(vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
diff --git a/queue-6.1/s390-dasd-fix-hanging-blockdevice-after-request-requeue.patch b/queue-6.1/s390-dasd-fix-hanging-blockdevice-after-request-requeue.patch
new file mode 100644 (file)
index 0000000..d00ca52
--- /dev/null
@@ -0,0 +1,39 @@
+From d8898ee50edecacdf0141f26fd90acf43d7e9cd7 Mon Sep 17 00:00:00 2001
+From: Stefan Haberland <sth@linux.ibm.com>
+Date: Wed, 5 Apr 2023 16:20:17 +0200
+Subject: s390/dasd: fix hanging blockdevice after request requeue
+
+From: Stefan Haberland <sth@linux.ibm.com>
+
+commit d8898ee50edecacdf0141f26fd90acf43d7e9cd7 upstream.
+
+The DASD driver does not kick the requeue list when requeuing IO requests
+to the blocklayer. This might lead to hanging blockdevice when there is
+no other trigger for this.
+
+Fix by automatically kick the requeue list when requeuing DASD requests
+to the blocklayer.
+
+Fixes: e443343e509a ("s390/dasd: blk-mq conversion")
+CC: stable@vger.kernel.org # 4.14+
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Reviewed-by: Jan Hoeppner <hoeppner@linux.ibm.com>
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Link: https://lore.kernel.org/r/20230405142017.2446986-8-sth@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/block/dasd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -2941,7 +2941,7 @@ static int _dasd_requeue_request(struct
+               return 0;
+       spin_lock_irq(&cqr->dq->lock);
+       req = (struct request *) cqr->callback_data;
+-      blk_mq_requeue_request(req, false);
++      blk_mq_requeue_request(req, true);
+       spin_unlock_irq(&cqr->dq->lock);
+       return 0;
diff --git a/queue-6.1/scripts-gdb-fix-lx-timerlist-for-python3.patch b/queue-6.1/scripts-gdb-fix-lx-timerlist-for-python3.patch
new file mode 100644 (file)
index 0000000..483d967
--- /dev/null
@@ -0,0 +1,67 @@
+From 7362042f3556528e9e9b1eb5ce8d7a3a6331476b Mon Sep 17 00:00:00 2001
+From: Peng Liu <liupeng17@lenovo.com>
+Date: Tue, 21 Mar 2023 14:19:29 +0800
+Subject: scripts/gdb: fix lx-timerlist for Python3
+
+From: Peng Liu <liupeng17@lenovo.com>
+
+commit 7362042f3556528e9e9b1eb5ce8d7a3a6331476b upstream.
+
+Below incompatibilities between Python2 and Python3 made lx-timerlist fail
+to run under Python3.
+
+o xrange() is replaced by range() in Python3
+o bytes and str are different types in Python3
+o the return value of Inferior.read_memory() is memoryview object in
+  Python3
+
+akpm: cc stable so that older kernels are properly debuggable under newer
+Python.
+
+Link: https://lkml.kernel.org/r/TYCP286MB2146EE1180A4D5176CBA8AB2C6819@TYCP286MB2146.JPNP286.PROD.OUTLOOK.COM
+Signed-off-by: Peng Liu <liupeng17@lenovo.com>
+Reviewed-by: Jan Kiszka <jan.kiszka@siemens.com>
+Cc: Florian Fainelli <f.fainelli@gmail.com>
+Cc: Kieran Bingham <kbingham@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/gdb/linux/timerlist.py |    4 +++-
+ scripts/gdb/linux/utils.py     |    5 ++++-
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+--- a/scripts/gdb/linux/timerlist.py
++++ b/scripts/gdb/linux/timerlist.py
+@@ -73,7 +73,7 @@ def print_cpu(hrtimer_bases, cpu, max_cl
+     ts = cpus.per_cpu(tick_sched_ptr, cpu)
+     text = "cpu: {}\n".format(cpu)
+-    for i in xrange(max_clock_bases):
++    for i in range(max_clock_bases):
+         text += " clock {}:\n".format(i)
+         text += print_base(cpu_base['clock_base'][i])
+@@ -158,6 +158,8 @@ def pr_cpumask(mask):
+     num_bytes = (nr_cpu_ids + 7) / 8
+     buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
+     buf = binascii.b2a_hex(buf)
++    if type(buf) is not str:
++        buf=buf.decode()
+     chunks = []
+     i = num_bytes
+--- a/scripts/gdb/linux/utils.py
++++ b/scripts/gdb/linux/utils.py
+@@ -88,7 +88,10 @@ def get_target_endianness():
+ def read_memoryview(inf, start, length):
+-    return memoryview(inf.read_memory(start, length))
++    m = inf.read_memory(start, length)
++    if type(m) is memoryview:
++        return m
++    return memoryview(m)
+ def read_u16(buffer, offset):
index 8b291a41e3ab86e0d8fe4a098e814c97b19401da..2f4c03950e76df95af0abaf187ab4c0ea383a3f7 100644 (file)
@@ -591,3 +591,15 @@ wifi-rtl8xxxu-rtl8192eu-always-needs-full-init.patch
 wifi-rtw89-fix-potential-race-condition-between-napi_init-and-napi_enable.patch
 clk-microchip-fix-potential-uaf-in-auxdev-release-callback.patch
 clk-rockchip-rk3399-allow-clk_cifout-to-force-clk_cifout_src-to-reparent.patch
+scripts-gdb-fix-lx-timerlist-for-python3.patch
+btrfs-scrub-reject-unsupported-scrub-flags.patch
+s390-dasd-fix-hanging-blockdevice-after-request-requeue.patch
+ia64-fix-an-addr-to-taddr-in-huge_pte_offset.patch
+mm-mempolicy-correctly-update-prev-when-policy-is-equal-on-mbind.patch
+vhost_vdpa-fix-unmap-process-in-no-batch-mode.patch
+dm-verity-fix-error-handling-for-check_at_most_once-on-fec.patch
+dm-clone-call-kmem_cache_destroy-in-dm_clone_init-error-path.patch
+dm-integrity-call-kmem_cache_destroy-in-dm_integrity_init-error-path.patch
+dm-flakey-fix-a-crash-with-invalid-table-line.patch
+dm-ioctl-fix-nested-locking-in-table_clear-to-remove-deadlock-concern.patch
+dm-don-t-lock-fs-when-the-map-is-null-in-process-of-resume.patch
diff --git a/queue-6.1/vhost_vdpa-fix-unmap-process-in-no-batch-mode.patch b/queue-6.1/vhost_vdpa-fix-unmap-process-in-no-batch-mode.patch
new file mode 100644 (file)
index 0000000..41d785a
--- /dev/null
@@ -0,0 +1,61 @@
+From c82729e06644f4e087f5ff0f91b8fb15e03b8890 Mon Sep 17 00:00:00 2001
+From: Cindy Lu <lulu@redhat.com>
+Date: Thu, 20 Apr 2023 23:17:34 +0800
+Subject: vhost_vdpa: fix unmap process in no-batch mode
+
+From: Cindy Lu <lulu@redhat.com>
+
+commit c82729e06644f4e087f5ff0f91b8fb15e03b8890 upstream.
+
+While using the vdpa device with vIOMMU enabled
+in the guest VM, when the vdpa device bind to vfio-pci and run testpmd
+then system will fail to unmap.
+The test process is
+Load guest VM --> attach to virtio driver--> bind to vfio-pci driver
+So the mapping process is
+1)batched mode map to normal MR
+2)batched mode unmapped the normal MR
+3)unmapped all the memory
+4)mapped to iommu MR
+
+This error happened in step 3). The iotlb was freed in step 2)
+and the function vhost_vdpa_process_iotlb_msg will return fail
+Which causes failure.
+
+To fix this, we will not remove the AS while the iotlb->nmaps is 0.
+This will free in the vhost_vdpa_clean
+
+Cc: stable@vger.kernel.org
+Fixes: aaca8373c4b1 ("vhost-vdpa: support ASID based IOTLB API")
+Signed-off-by: Cindy Lu <lulu@redhat.com>
+Message-Id: <20230420151734.860168-1-lulu@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/vdpa.c |    8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -819,11 +819,7 @@ static void vhost_vdpa_unmap(struct vhos
+               if (!v->in_batch)
+                       ops->set_map(vdpa, asid, iotlb);
+       }
+-      /* If we are in the middle of batch processing, delay the free
+-       * of AS until BATCH_END.
+-       */
+-      if (!v->in_batch && !iotlb->nmaps)
+-              vhost_vdpa_remove_as(v, asid);
++
+ }
+ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+@@ -1080,8 +1076,6 @@ static int vhost_vdpa_process_iotlb_msg(
+               if (v->in_batch && ops->set_map)
+                       ops->set_map(vdpa, asid, iotlb);
+               v->in_batch = false;
+-              if (!iotlb->nmaps)
+-                      vhost_vdpa_remove_as(v, asid);
+               break;
+       default:
+               r = -EINVAL;