]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Aug 2025 14:23:17 +0000 (16:23 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Aug 2025 14:23:17 +0000 (16:23 +0200)
added patches:
acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch
nfs-fix-a-race-when-updating-an-existing-write.patch
nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch
vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch

queue-6.6/acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch [new file with mode: 0644]
queue-6.6/nfs-fix-a-race-when-updating-an-existing-write.patch [new file with mode: 0644]
queue-6.6/nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch [new file with mode: 0644]

diff --git a/queue-6.6/acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch b/queue-6.6/acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch
new file mode 100644 (file)
index 0000000..166a074
--- /dev/null
@@ -0,0 +1,35 @@
+From 9cd51eefae3c871440b93c03716c5398f41bdf78 Mon Sep 17 00:00:00 2001
+From: Werner Sembach <wse@tuxedocomputers.com>
+Date: Thu, 8 May 2025 13:16:18 +0200
+Subject: ACPI: EC: Add device to acpi_ec_no_wakeup[] qurik list
+
+From: Werner Sembach <wse@tuxedocomputers.com>
+
+commit 9cd51eefae3c871440b93c03716c5398f41bdf78 upstream.
+
+Add the TUXEDO InfinityBook Pro AMD Gen9 to the acpi_ec_no_wakeup[]
+quirk list to prevent spurious wakeups.
+
+Signed-off-by: Werner Sembach <wse@tuxedocomputers.com>
+Link: https://patch.msgid.link/20250508111625.12149-1-wse@tuxedocomputers.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/ec.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2329,6 +2329,12 @@ static const struct dmi_system_id acpi_e
+                       DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+               }
+       },
++      {
++              // TUXEDO InfinityBook Pro AMD Gen9
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
++              },
++      },
+       { },
+ };
diff --git a/queue-6.6/nfs-fix-a-race-when-updating-an-existing-write.patch b/queue-6.6/nfs-fix-a-race-when-updating-an-existing-write.patch
new file mode 100644 (file)
index 0000000..3d88606
--- /dev/null
@@ -0,0 +1,186 @@
+From 76d2e3890fb169168c73f2e4f8375c7cc24a765e Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Sat, 16 Aug 2025 07:25:20 -0700
+Subject: NFS: Fix a race when updating an existing write
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 76d2e3890fb169168c73f2e4f8375c7cc24a765e upstream.
+
+After nfs_lock_and_join_requests() tests for whether the request is
+still attached to the mapping, nothing prevents a call to
+nfs_inode_remove_request() from succeeding until we actually lock the
+page group.
+The reason is that whoever called nfs_inode_remove_request() doesn't
+necessarily have a lock on the page group head.
+
+So in order to avoid races, let's take the page group lock earlier in
+nfs_lock_and_join_requests(), and hold it across the removal of the
+request in nfs_inode_remove_request().
+
+Reported-by: Jeff Layton <jlayton@kernel.org>
+Tested-by: Joe Quanaim <jdq@meta.com>
+Tested-by: Andrew Steffen <aksteffen@meta.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Fixes: bd37d6fce184 ("NFSv4: Convert nfs_lock_and_join_requests() to use nfs_page_find_head_request()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/pagelist.c        |    9 +++--
+ fs/nfs/write.c           |   71 ++++++++++++++++-------------------------------
+ include/linux/nfs_page.h |    1 
+ 3 files changed, 31 insertions(+), 50 deletions(-)
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -272,13 +272,14 @@ nfs_page_group_unlock(struct nfs_page *r
+       nfs_page_clear_headlock(req);
+ }
+-/*
+- * nfs_page_group_sync_on_bit_locked
++/**
++ * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
++ * @req: request in page group
++ * @bit: PG_* bit that is used to sync page group
+  *
+  * must be called with page group lock held
+  */
+-static bool
+-nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
++bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
+ {
+       struct nfs_page *head = req->wb_head;
+       struct nfs_page *tmp;
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -156,20 +156,10 @@ nfs_page_set_inode_ref(struct nfs_page *
+       }
+ }
+-static int
+-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
++static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+ {
+-      int ret;
+-
+-      if (!test_bit(PG_REMOVE, &req->wb_flags))
+-              return 0;
+-      ret = nfs_page_group_lock(req);
+-      if (ret)
+-              return ret;
+       if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
+               nfs_page_set_inode_ref(req, inode);
+-      nfs_page_group_unlock(req);
+-      return 0;
+ }
+ static struct nfs_page *nfs_folio_private_request(struct folio *folio)
+@@ -238,36 +228,6 @@ static struct nfs_page *nfs_folio_find_h
+       return req;
+ }
+-static struct nfs_page *nfs_folio_find_and_lock_request(struct folio *folio)
+-{
+-      struct inode *inode = folio_file_mapping(folio)->host;
+-      struct nfs_page *req, *head;
+-      int ret;
+-
+-      for (;;) {
+-              req = nfs_folio_find_head_request(folio);
+-              if (!req)
+-                      return req;
+-              head = nfs_page_group_lock_head(req);
+-              if (head != req)
+-                      nfs_release_request(req);
+-              if (IS_ERR(head))
+-                      return head;
+-              ret = nfs_cancel_remove_inode(head, inode);
+-              if (ret < 0) {
+-                      nfs_unlock_and_release_request(head);
+-                      return ERR_PTR(ret);
+-              }
+-              /* Ensure that nobody removed the request before we locked it */
+-              if (head == nfs_folio_private_request(folio))
+-                      break;
+-              if (folio_test_swapcache(folio))
+-                      break;
+-              nfs_unlock_and_release_request(head);
+-      }
+-      return head;
+-}
+-
+ /* Adjust the file length if we're writing beyond the end */
+ static void nfs_grow_file(struct folio *folio, unsigned int offset,
+                         unsigned int count)
+@@ -621,20 +581,37 @@ static struct nfs_page *nfs_lock_and_joi
+       struct nfs_commit_info cinfo;
+       int ret;
+-      nfs_init_cinfo_from_inode(&cinfo, inode);
+       /*
+        * A reference is taken only on the head request which acts as a
+        * reference to the whole page group - the group will not be destroyed
+        * until the head reference is released.
+        */
+-      head = nfs_folio_find_and_lock_request(folio);
+-      if (IS_ERR_OR_NULL(head))
+-              return head;
++retry:
++      head = nfs_folio_find_head_request(folio);
++      if (!head)
++              return NULL;
++
++      while (!nfs_lock_request(head)) {
++              ret = nfs_wait_on_request(head);
++              if (ret < 0) {
++                      nfs_release_request(head);
++                      return ERR_PTR(ret);
++              }
++      }
+       ret = nfs_page_group_lock(head);
+       if (ret < 0)
+               goto out_unlock;
++      /* Ensure that nobody removed the request before we locked it */
++      if (head != folio->private && !folio_test_swapcache(folio)) {
++              nfs_page_group_unlock(head);
++              nfs_unlock_and_release_request(head);
++              goto retry;
++      }
++
++      nfs_cancel_remove_inode(head, inode);
++
+       /* lock each request in the page group */
+       for (subreq = head->wb_this_page;
+            subreq != head;
+@@ -855,7 +832,8 @@ static void nfs_inode_remove_request(str
+ {
+       struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
+-      if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
++      nfs_page_group_lock(req);
++      if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
+               struct folio *folio = nfs_page_to_folio(req->wb_head);
+               struct address_space *mapping = folio_file_mapping(folio);
+@@ -867,6 +845,7 @@ static void nfs_inode_remove_request(str
+               }
+               spin_unlock(&mapping->private_lock);
+       }
++      nfs_page_group_unlock(req);
+       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
+               atomic_long_dec(&nfsi->nrequests);
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -162,6 +162,7 @@ extern void nfs_join_page_group(struct n
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
++extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
+ extern        int nfs_page_set_headlock(struct nfs_page *req);
+ extern void nfs_page_clear_headlock(struct nfs_page *req);
+ extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
diff --git a/queue-6.6/nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch b/queue-6.6/nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch
new file mode 100644 (file)
index 0000000..d37f8e0
--- /dev/null
@@ -0,0 +1,223 @@
+From 25edbcac6e32eab345e470d56ca9974a577b878b Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 1 Jul 2024 07:26:52 +0200
+Subject: nfs: fold nfs_page_group_lock_subrequests into nfs_lock_and_join_requests
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 25edbcac6e32eab345e470d56ca9974a577b878b upstream.
+
+Fold nfs_page_group_lock_subrequests into nfs_lock_and_join_requests to
+prepare for future changes to this code, and move the helpers to write.c
+as well.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/pagelist.c        |   77 -----------------------------------------------
+ fs/nfs/write.c           |   75 ++++++++++++++++++++++++++++++++++++++++++---
+ include/linux/nfs_page.h |    1 
+ 3 files changed, 69 insertions(+), 84 deletions(-)
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -207,83 +207,6 @@ nfs_page_group_lock_head(struct nfs_page
+ }
+ /*
+- * nfs_unroll_locks -  unlock all newly locked reqs and wait on @req
+- * @head: head request of page group, must be holding head lock
+- * @req: request that couldn't lock and needs to wait on the req bit lock
+- *
+- * This is a helper function for nfs_lock_and_join_requests
+- * returns 0 on success, < 0 on error.
+- */
+-static void
+-nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
+-{
+-      struct nfs_page *tmp;
+-
+-      /* relinquish all the locks successfully grabbed this run */
+-      for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
+-              if (!kref_read(&tmp->wb_kref))
+-                      continue;
+-              nfs_unlock_and_release_request(tmp);
+-      }
+-}
+-
+-/*
+- * nfs_page_group_lock_subreq -  try to lock a subrequest
+- * @head: head request of page group
+- * @subreq: request to lock
+- *
+- * This is a helper function for nfs_lock_and_join_requests which
+- * must be called with the head request and page group both locked.
+- * On error, it returns with the page group unlocked.
+- */
+-static int
+-nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
+-{
+-      int ret;
+-
+-      if (!kref_get_unless_zero(&subreq->wb_kref))
+-              return 0;
+-      while (!nfs_lock_request(subreq)) {
+-              nfs_page_group_unlock(head);
+-              ret = nfs_wait_on_request(subreq);
+-              if (!ret)
+-                      ret = nfs_page_group_lock(head);
+-              if (ret < 0) {
+-                      nfs_unroll_locks(head, subreq);
+-                      nfs_release_request(subreq);
+-                      return ret;
+-              }
+-      }
+-      return 0;
+-}
+-
+-/*
+- * nfs_page_group_lock_subrequests -  try to lock the subrequests
+- * @head: head request of page group
+- *
+- * This is a helper function for nfs_lock_and_join_requests which
+- * must be called with the head request locked.
+- */
+-int nfs_page_group_lock_subrequests(struct nfs_page *head)
+-{
+-      struct nfs_page *subreq;
+-      int ret;
+-
+-      ret = nfs_page_group_lock(head);
+-      if (ret < 0)
+-              return ret;
+-      /* lock each request in the page group */
+-      for (subreq = head->wb_this_page; subreq != head;
+-                      subreq = subreq->wb_this_page) {
+-              ret = nfs_page_group_lock_subreq(head, subreq);
+-              if (ret < 0)
+-                      return ret;
+-      }
+-      nfs_page_group_unlock(head);
+-      return 0;
+-}
+-
+-/*
+  * nfs_page_set_headlock - set the request PG_HEADLOCK
+  * @req: request that is to be locked
+  *
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -549,6 +549,57 @@ void nfs_join_page_group(struct nfs_page
+ }
+ /*
++ * nfs_unroll_locks -  unlock all newly locked reqs and wait on @req
++ * @head: head request of page group, must be holding head lock
++ * @req: request that couldn't lock and needs to wait on the req bit lock
++ *
++ * This is a helper function for nfs_lock_and_join_requests
++ * returns 0 on success, < 0 on error.
++ */
++static void
++nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
++{
++      struct nfs_page *tmp;
++
++      /* relinquish all the locks successfully grabbed this run */
++      for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
++              if (!kref_read(&tmp->wb_kref))
++                      continue;
++              nfs_unlock_and_release_request(tmp);
++      }
++}
++
++/*
++ * nfs_page_group_lock_subreq -  try to lock a subrequest
++ * @head: head request of page group
++ * @subreq: request to lock
++ *
++ * This is a helper function for nfs_lock_and_join_requests which
++ * must be called with the head request and page group both locked.
++ * On error, it returns with the page group unlocked.
++ */
++static int
++nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
++{
++      int ret;
++
++      if (!kref_get_unless_zero(&subreq->wb_kref))
++              return 0;
++      while (!nfs_lock_request(subreq)) {
++              nfs_page_group_unlock(head);
++              ret = nfs_wait_on_request(subreq);
++              if (!ret)
++                      ret = nfs_page_group_lock(head);
++              if (ret < 0) {
++                      nfs_unroll_locks(head, subreq);
++                      nfs_release_request(subreq);
++                      return ret;
++              }
++      }
++      return 0;
++}
++
++/*
+  * nfs_lock_and_join_requests - join all subreqs to the head req
+  * @folio: the folio used to lookup the "page group" of nfs_page structures
+  *
+@@ -566,7 +617,7 @@ void nfs_join_page_group(struct nfs_page
+ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ {
+       struct inode *inode = folio_file_mapping(folio)->host;
+-      struct nfs_page *head;
++      struct nfs_page *head, *subreq;
+       struct nfs_commit_info cinfo;
+       int ret;
+@@ -580,16 +631,28 @@ static struct nfs_page *nfs_lock_and_joi
+       if (IS_ERR_OR_NULL(head))
+               return head;
++      ret = nfs_page_group_lock(head);
++      if (ret < 0)
++              goto out_unlock;
++
+       /* lock each request in the page group */
+-      ret = nfs_page_group_lock_subrequests(head);
+-      if (ret < 0) {
+-              nfs_unlock_and_release_request(head);
+-              return ERR_PTR(ret);
++      for (subreq = head->wb_this_page;
++           subreq != head;
++           subreq = subreq->wb_this_page) {
++              ret = nfs_page_group_lock_subreq(head, subreq);
++              if (ret < 0)
++                      goto out_unlock;
+       }
+-      nfs_join_page_group(head, &cinfo, inode);
++      nfs_page_group_unlock(head);
++      nfs_init_cinfo_from_inode(&cinfo, inode);
++      nfs_join_page_group(head, &cinfo, inode);
+       return head;
++
++out_unlock:
++      nfs_unlock_and_release_request(head);
++      return ERR_PTR(ret);
+ }
+ static void nfs_write_error(struct nfs_page *req, int error)
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -156,7 +156,6 @@ extern  int nfs_wait_on_request(struct n
+ extern        void nfs_unlock_request(struct nfs_page *req);
+ extern        void nfs_unlock_and_release_request(struct nfs_page *);
+ extern        struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+-extern        int nfs_page_group_lock_subrequests(struct nfs_page *head);
+ extern void nfs_join_page_group(struct nfs_page *head,
+                               struct nfs_commit_info *cinfo,
+                               struct inode *inode);
index c7c38c3096922738f0a0247de7eed92b67d9389c..c20bf8bd678bcbf4afe705306fdc21228b990468 100644 (file)
@@ -10,3 +10,7 @@ smb-client-fix-race-with-concurrent-opens-in-unlink-.patch
 smb-client-fix-race-with-concurrent-opens-in-rename-.patch
 asoc-codecs-tx-macro-correct-tx_macro_component_drv-.patch
 erofs-fix-atomic-context-detection-when-config_debug.patch
+acpi-ec-add-device-to-acpi_ec_no_wakeup-qurik-list.patch
+nfs-fold-nfs_page_group_lock_subrequests-into-nfs_lock_and_join_requests.patch
+nfs-fix-a-race-when-updating-an-existing-write.patch
+vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch
diff --git a/queue-6.6/vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch b/queue-6.6/vhost-net-protect-ubufs-with-rcu-read-lock-in-vhost_net_ubuf_put.patch
new file mode 100644 (file)
index 0000000..6a1d7c0
--- /dev/null
@@ -0,0 +1,79 @@
+From dd54bcf86c91a4455b1f95cbc8e9ac91205f3193 Mon Sep 17 00:00:00 2001
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+Date: Tue, 5 Aug 2025 16:09:17 +0300
+Subject: vhost/net: Protect ubufs with rcu read lock in vhost_net_ubuf_put()
+
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+
+commit dd54bcf86c91a4455b1f95cbc8e9ac91205f3193 upstream.
+
+When operating on struct vhost_net_ubuf_ref, the following execution
+sequence is theoretically possible:
+CPU0 is finalizing DMA operation                   CPU1 is doing VHOST_NET_SET_BACKEND
+                             // ubufs->refcount == 2
+vhost_net_ubuf_put()                               vhost_net_ubuf_put_wait_and_free(oldubufs)
+                                                     vhost_net_ubuf_put_and_wait()
+                                                       vhost_net_ubuf_put()
+                                                         int r = atomic_sub_return(1, &ubufs->refcount);
+                                                         // r = 1
+int r = atomic_sub_return(1, &ubufs->refcount);
+// r = 0
+                                                      wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
+                                                      // no wait occurs here because condition is already true
+                                                    kfree(ubufs);
+if (unlikely(!r))
+  wake_up(&ubufs->wait);  // use-after-free
+
+This leads to use-after-free on ubufs access. This happens because CPU1
+skips waiting for wake_up() when refcount is already zero.
+
+To prevent that use a read-side RCU critical section in vhost_net_ubuf_put(),
+as suggested by Hillf Danton. For this lock to take effect, free ubufs with
+kfree_rcu().
+
+Cc: stable@vger.kernel.org
+Fixes: 0ad8b480d6ee9 ("vhost: fix ref cnt checking deadlock")
+Reported-by: Andrey Ryabinin <arbn@yandex-team.com>
+Suggested-by: Hillf Danton <hdanton@sina.com>
+Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru>
+Message-Id: <20250805130917.727332-1-kniv@yandex-team.ru>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -96,6 +96,7 @@ struct vhost_net_ubuf_ref {
+       atomic_t refcount;
+       wait_queue_head_t wait;
+       struct vhost_virtqueue *vq;
++      struct rcu_head rcu;
+ };
+ #define VHOST_NET_BATCH 64
+@@ -249,9 +250,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqu
+ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+ {
+-      int r = atomic_sub_return(1, &ubufs->refcount);
++      int r;
++
++      rcu_read_lock();
++      r = atomic_sub_return(1, &ubufs->refcount);
+       if (unlikely(!r))
+               wake_up(&ubufs->wait);
++      rcu_read_unlock();
+       return r;
+ }
+@@ -264,7 +269,7 @@ static void vhost_net_ubuf_put_and_wait(
+ static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
+ {
+       vhost_net_ubuf_put_and_wait(ubufs);
+-      kfree(ubufs);
++      kfree_rcu(ubufs, rcu);
+ }
+ static void vhost_net_clear_ubuf_info(struct vhost_net *n)