]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Aug 2025 14:23:06 +0000 (16:23 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Aug 2025 14:23:06 +0000 (16:23 +0200)
added patches:
acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch
nfs-fix-a-race-when-updating-an-existing-write.patch
nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch
vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch

queue-6.1/acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch [new file with mode: 0644]
queue-6.1/nfs-fix-a-race-when-updating-an-existing-write.patch [new file with mode: 0644]
queue-6.1/nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch [new file with mode: 0644]

diff --git a/queue-6.1/acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch b/queue-6.1/acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch
new file mode 100644 (file)
index 0000000..1841909
--- /dev/null
@@ -0,0 +1,35 @@
+From 9cd51eefae3c871440b93c03716c5398f41bdf78 Mon Sep 17 00:00:00 2001
+From: Werner Sembach <wse@tuxedocomputers.com>
+Date: Thu, 8 May 2025 13:16:18 +0200
+Subject: ACPI: EC: Add device to acpi_ec_no_wakeup[] qurik list
+
+From: Werner Sembach <wse@tuxedocomputers.com>
+
+commit 9cd51eefae3c871440b93c03716c5398f41bdf78 upstream.
+
+Add the TUXEDO InfinityBook Pro AMD Gen9 to the acpi_ec_no_wakeup[]
+quirk list to prevent spurious wakeups.
+
+Signed-off-by: Werner Sembach <wse@tuxedocomputers.com>
+Link: https://patch.msgid.link/20250508111625.12149-1-wse@tuxedocomputers.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/ec.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2288,6 +2288,12 @@ static const struct dmi_system_id acpi_e
+                       DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+               }
+       },
++      {
++              // TUXEDO InfinityBook Pro AMD Gen9
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
++              },
++      },
+       { },
+ };
diff --git a/queue-6.1/nfs-fix-a-race-when-updating-an-existing-write.patch b/queue-6.1/nfs-fix-a-race-when-updating-an-existing-write.patch
new file mode 100644 (file)
index 0000000..ec94d60
--- /dev/null
@@ -0,0 +1,178 @@
+From 76d2e3890fb169168c73f2e4f8375c7cc24a765e Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Sat, 16 Aug 2025 07:25:20 -0700
+Subject: NFS: Fix a race when updating an existing write
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 76d2e3890fb169168c73f2e4f8375c7cc24a765e upstream.
+
+After nfs_lock_and_join_requests() tests for whether the request is
+still attached to the mapping, nothing prevents a call to
+nfs_inode_remove_request() from succeeding until we actually lock the
+page group.
+The reason is that whoever called nfs_inode_remove_request() doesn't
+necessarily have a lock on the page group head.
+
+So in order to avoid races, let's take the page group lock earlier in
+nfs_lock_and_join_requests(), and hold it across the removal of the
+request in nfs_inode_remove_request().
+
+Reported-by: Jeff Layton <jlayton@kernel.org>
+Tested-by: Joe Quanaim <jdq@meta.com>
+Tested-by: Andrew Steffen <aksteffen@meta.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Fixes: bd37d6fce184 ("NFSv4: Convert nfs_lock_and_join_requests() to use nfs_page_find_head_request()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/pagelist.c        |    9 +++---
+ fs/nfs/write.c           |   66 ++++++++++++++++-------------------------------
+ include/linux/nfs_page.h |    1 
+ 3 files changed, 29 insertions(+), 47 deletions(-)
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -233,13 +233,14 @@ nfs_page_group_unlock(struct nfs_page *r
+       nfs_page_clear_headlock(req);
+ }
+-/*
+- * nfs_page_group_sync_on_bit_locked
++/**
++ * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
++ * @req: request in page group
++ * @bit: PG_* bit that is used to sync page group
+  *
+  * must be called with page group lock held
+  */
+-static bool
+-nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
++bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
+ {
+       struct nfs_page *head = req->wb_head;
+       struct nfs_page *tmp;
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -154,20 +154,10 @@ nfs_page_set_inode_ref(struct nfs_page *
+       }
+ }
+-static int
+-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
++static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+ {
+-      int ret;
+-
+-      if (!test_bit(PG_REMOVE, &req->wb_flags))
+-              return 0;
+-      ret = nfs_page_group_lock(req);
+-      if (ret)
+-              return ret;
+       if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
+               nfs_page_set_inode_ref(req, inode);
+-      nfs_page_group_unlock(req);
+-      return 0;
+ }
+ static struct nfs_page *
+@@ -239,36 +229,6 @@ static struct nfs_page *nfs_page_find_he
+       return req;
+ }
+-static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
+-{
+-      struct inode *inode = page_file_mapping(page)->host;
+-      struct nfs_page *req, *head;
+-      int ret;
+-
+-      for (;;) {
+-              req = nfs_page_find_head_request(page);
+-              if (!req)
+-                      return req;
+-              head = nfs_page_group_lock_head(req);
+-              if (head != req)
+-                      nfs_release_request(req);
+-              if (IS_ERR(head))
+-                      return head;
+-              ret = nfs_cancel_remove_inode(head, inode);
+-              if (ret < 0) {
+-                      nfs_unlock_and_release_request(head);
+-                      return ERR_PTR(ret);
+-              }
+-              /* Ensure that nobody removed the request before we locked it */
+-              if (head == nfs_page_private_request(page))
+-                      break;
+-              if (PageSwapCache(page))
+-                      break;
+-              nfs_unlock_and_release_request(head);
+-      }
+-      return head;
+-}
+-
+ /* Adjust the file length if we're writing beyond the end */
+ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
+ {
+@@ -625,14 +585,32 @@ nfs_lock_and_join_requests(struct page *
+        * reference to the whole page group - the group will not be destroyed
+        * until the head reference is released.
+        */
+-      head = nfs_find_and_lock_page_request(page);
++retry:
++      head = nfs_page_find_head_request(page);
+       if (IS_ERR_OR_NULL(head))
+               return head;
++      while (!nfs_lock_request(head)) {
++              ret = nfs_wait_on_request(head);
++              if (ret < 0) {
++                      nfs_release_request(head);
++                      return ERR_PTR(ret);
++              }
++      }
++
+       ret = nfs_page_group_lock(head);
+       if (ret < 0)
+               goto out_unlock;
++      /* Ensure that nobody removed the request before we locked it */
++      if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
++              nfs_page_group_unlock(head);
++              nfs_unlock_and_release_request(head);
++              goto retry;
++      }
++
++      nfs_cancel_remove_inode(head, inode);
++
+       /* lock each request in the page group */
+       for (subreq = head->wb_this_page;
+            subreq != head;
+@@ -853,7 +831,8 @@ static void nfs_inode_remove_request(str
+       struct nfs_inode *nfsi = NFS_I(inode);
+       struct nfs_page *head;
+-      if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
++      nfs_page_group_lock(req);
++      if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
+               head = req->wb_head;
+               spin_lock(&mapping->private_lock);
+@@ -864,6 +843,7 @@ static void nfs_inode_remove_request(str
+               }
+               spin_unlock(&mapping->private_lock);
+       }
++      nfs_page_group_unlock(req);
+       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
+               nfs_release_request(req);
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -148,6 +148,7 @@ extern     void nfs_join_page_group(struct n
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
++extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
+ extern        int nfs_page_set_headlock(struct nfs_page *req);
+ extern void nfs_page_clear_headlock(struct nfs_page *req);
+ extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
diff --git a/queue-6.1/nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch b/queue-6.1/nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch
new file mode 100644 (file)
index 0000000..1b87215
--- /dev/null
@@ -0,0 +1,222 @@
+From 25edbcac6e32eab345e470d56ca9974a577b878b Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 1 Jul 2024 07:26:52 +0200
+Subject: nfs: fold nfs_page_group_lock_subrequests into nfs_lock_and_join_requests
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 25edbcac6e32eab345e470d56ca9974a577b878b upstream.
+
+Fold nfs_page_group_lock_subrequests into nfs_lock_and_join_requests to
+prepare for future changes to this code, and move the helpers to write.c
+as well.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/pagelist.c        |   77 -----------------------------------------------
+ fs/nfs/write.c           |   74 +++++++++++++++++++++++++++++++++++++++++----
+ include/linux/nfs_page.h |    1 
+ 3 files changed, 68 insertions(+), 84 deletions(-)
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -168,83 +168,6 @@ nfs_page_group_lock_head(struct nfs_page
+ }
+ /*
+- * nfs_unroll_locks -  unlock all newly locked reqs and wait on @req
+- * @head: head request of page group, must be holding head lock
+- * @req: request that couldn't lock and needs to wait on the req bit lock
+- *
+- * This is a helper function for nfs_lock_and_join_requests
+- * returns 0 on success, < 0 on error.
+- */
+-static void
+-nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
+-{
+-      struct nfs_page *tmp;
+-
+-      /* relinquish all the locks successfully grabbed this run */
+-      for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
+-              if (!kref_read(&tmp->wb_kref))
+-                      continue;
+-              nfs_unlock_and_release_request(tmp);
+-      }
+-}
+-
+-/*
+- * nfs_page_group_lock_subreq -  try to lock a subrequest
+- * @head: head request of page group
+- * @subreq: request to lock
+- *
+- * This is a helper function for nfs_lock_and_join_requests which
+- * must be called with the head request and page group both locked.
+- * On error, it returns with the page group unlocked.
+- */
+-static int
+-nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
+-{
+-      int ret;
+-
+-      if (!kref_get_unless_zero(&subreq->wb_kref))
+-              return 0;
+-      while (!nfs_lock_request(subreq)) {
+-              nfs_page_group_unlock(head);
+-              ret = nfs_wait_on_request(subreq);
+-              if (!ret)
+-                      ret = nfs_page_group_lock(head);
+-              if (ret < 0) {
+-                      nfs_unroll_locks(head, subreq);
+-                      nfs_release_request(subreq);
+-                      return ret;
+-              }
+-      }
+-      return 0;
+-}
+-
+-/*
+- * nfs_page_group_lock_subrequests -  try to lock the subrequests
+- * @head: head request of page group
+- *
+- * This is a helper function for nfs_lock_and_join_requests which
+- * must be called with the head request locked.
+- */
+-int nfs_page_group_lock_subrequests(struct nfs_page *head)
+-{
+-      struct nfs_page *subreq;
+-      int ret;
+-
+-      ret = nfs_page_group_lock(head);
+-      if (ret < 0)
+-              return ret;
+-      /* lock each request in the page group */
+-      for (subreq = head->wb_this_page; subreq != head;
+-                      subreq = subreq->wb_this_page) {
+-              ret = nfs_page_group_lock_subreq(head, subreq);
+-              if (ret < 0)
+-                      return ret;
+-      }
+-      nfs_page_group_unlock(head);
+-      return 0;
+-}
+-
+-/*
+  * nfs_page_set_headlock - set the request PG_HEADLOCK
+  * @req: request that is to be locked
+  *
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -548,6 +548,57 @@ nfs_join_page_group(struct nfs_page *hea
+ }
+ /*
++ * nfs_unroll_locks -  unlock all newly locked reqs and wait on @req
++ * @head: head request of page group, must be holding head lock
++ * @req: request that couldn't lock and needs to wait on the req bit lock
++ *
++ * This is a helper function for nfs_lock_and_join_requests
++ * returns 0 on success, < 0 on error.
++ */
++static void
++nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
++{
++      struct nfs_page *tmp;
++
++      /* relinquish all the locks successfully grabbed this run */
++      for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
++              if (!kref_read(&tmp->wb_kref))
++                      continue;
++              nfs_unlock_and_release_request(tmp);
++      }
++}
++
++/*
++ * nfs_page_group_lock_subreq -  try to lock a subrequest
++ * @head: head request of page group
++ * @subreq: request to lock
++ *
++ * This is a helper function for nfs_lock_and_join_requests which
++ * must be called with the head request and page group both locked.
++ * On error, it returns with the page group unlocked.
++ */
++static int
++nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
++{
++      int ret;
++
++      if (!kref_get_unless_zero(&subreq->wb_kref))
++              return 0;
++      while (!nfs_lock_request(subreq)) {
++              nfs_page_group_unlock(head);
++              ret = nfs_wait_on_request(subreq);
++              if (!ret)
++                      ret = nfs_page_group_lock(head);
++              if (ret < 0) {
++                      nfs_unroll_locks(head, subreq);
++                      nfs_release_request(subreq);
++                      return ret;
++              }
++      }
++      return 0;
++}
++
++/*
+  * nfs_lock_and_join_requests - join all subreqs to the head req
+  * @page: the page used to lookup the "page group" of nfs_page structures
+  *
+@@ -566,7 +617,7 @@ static struct nfs_page *
+ nfs_lock_and_join_requests(struct page *page)
+ {
+       struct inode *inode = page_file_mapping(page)->host;
+-      struct nfs_page *head;
++      struct nfs_page *head, *subreq;
+       int ret;
+       /*
+@@ -578,16 +629,27 @@ nfs_lock_and_join_requests(struct page *
+       if (IS_ERR_OR_NULL(head))
+               return head;
++      ret = nfs_page_group_lock(head);
++      if (ret < 0)
++              goto out_unlock;
++
+       /* lock each request in the page group */
+-      ret = nfs_page_group_lock_subrequests(head);
+-      if (ret < 0) {
+-              nfs_unlock_and_release_request(head);
+-              return ERR_PTR(ret);
++      for (subreq = head->wb_this_page;
++           subreq != head;
++           subreq = subreq->wb_this_page) {
++              ret = nfs_page_group_lock_subreq(head, subreq);
++              if (ret < 0)
++                      goto out_unlock;
+       }
+-      nfs_join_page_group(head, inode);
++      nfs_page_group_unlock(head);
++      nfs_join_page_group(head, inode);
+       return head;
++
++out_unlock:
++      nfs_unlock_and_release_request(head);
++      return ERR_PTR(ret);
+ }
+ static void nfs_write_error(struct nfs_page *req, int error)
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -144,7 +144,6 @@ extern  int nfs_wait_on_request(struct n
+ extern        void nfs_unlock_request(struct nfs_page *req);
+ extern        void nfs_unlock_and_release_request(struct nfs_page *);
+ extern        struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+-extern        int nfs_page_group_lock_subrequests(struct nfs_page *head);
+ extern        void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
index 25f2e86de9471d3951ec4ebf541b3f23cabc85c8..522cbe2ac5bd130c9dd4669b46a94e340e7f023f 100644 (file)
@@ -6,3 +6,7 @@ scsi-core-sysfs-correct-sysfs-attributes-access-righ.patch
 smb-client-fix-race-with-concurrent-opens-in-unlink-.patch
 smb-client-fix-race-with-concurrent-opens-in-rename-.patch
 asoc-codecs-tx-macro-correct-tx_macro_component_drv-.patch
+acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch
+nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch
+nfs-fix-a-race-when-updating-an-existing-write.patch
+vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch
diff --git a/queue-6.1/vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch b/queue-6.1/vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch
new file mode 100644 (file)
index 0000000..1106920
--- /dev/null
@@ -0,0 +1,79 @@
+From dd54bcf86c91a4455b1f95cbc8e9ac91205f3193 Mon Sep 17 00:00:00 2001
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+Date: Tue, 5 Aug 2025 16:09:17 +0300
+Subject: vhost/net: Protect ubufs with rcu read lock in vhost_net_ubuf_put()
+
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+
+commit dd54bcf86c91a4455b1f95cbc8e9ac91205f3193 upstream.
+
+When operating on struct vhost_net_ubuf_ref, the following execution
+sequence is theoretically possible:
+CPU0 is finalizing DMA operation                   CPU1 is doing VHOST_NET_SET_BACKEND
+                             // ubufs->refcount == 2
+vhost_net_ubuf_put()                               vhost_net_ubuf_put_wait_and_free(oldubufs)
+                                                     vhost_net_ubuf_put_and_wait()
+                                                       vhost_net_ubuf_put()
+                                                         int r = atomic_sub_return(1, &ubufs->refcount);
+                                                         // r = 1
+int r = atomic_sub_return(1, &ubufs->refcount);
+// r = 0
+                                                      wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
+                                                      // no wait occurs here because condition is already true
+                                                    kfree(ubufs);
+if (unlikely(!r))
+  wake_up(&ubufs->wait);  // use-after-free
+
+This leads to use-after-free on ubufs access. This happens because CPU1
+skips waiting for wake_up() when refcount is already zero.
+
+To prevent that use a read-side RCU critical section in vhost_net_ubuf_put(),
+as suggested by Hillf Danton. For this lock to take effect, free ubufs with
+kfree_rcu().
+
+Cc: stable@vger.kernel.org
+Fixes: 0ad8b480d6ee9 ("vhost: fix ref cnt checking deadlock")
+Reported-by: Andrey Ryabinin <arbn@yandex-team.com>
+Suggested-by: Hillf Danton <hdanton@sina.com>
+Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru>
+Message-Id: <20250805130917.727332-1-kniv@yandex-team.ru>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -95,6 +95,7 @@ struct vhost_net_ubuf_ref {
+       atomic_t refcount;
+       wait_queue_head_t wait;
+       struct vhost_virtqueue *vq;
++      struct rcu_head rcu;
+ };
+ #define VHOST_NET_BATCH 64
+@@ -248,9 +249,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqu
+ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+ {
+-      int r = atomic_sub_return(1, &ubufs->refcount);
++      int r;
++
++      rcu_read_lock();
++      r = atomic_sub_return(1, &ubufs->refcount);
+       if (unlikely(!r))
+               wake_up(&ubufs->wait);
++      rcu_read_unlock();
+       return r;
+ }
+@@ -263,7 +268,7 @@ static void vhost_net_ubuf_put_and_wait(
+ static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
+ {
+       vhost_net_ubuf_put_and_wait(ubufs);
+-      kfree(ubufs);
++      kfree_rcu(ubufs, rcu);
+ }
+ static void vhost_net_clear_ubuf_info(struct vhost_net *n)