]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Tue, 8 Jul 2025 00:37:07 +0000 (20:37 -0400)
committerSasha Levin <sashal@kernel.org>
Tue, 8 Jul 2025 00:37:07 +0000 (20:37 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
19 files changed:
queue-5.10/btrfs-propagate-last_unlink_trans-earlier-when-doing.patch [new file with mode: 0644]
queue-5.10/btrfs-use-btrfs_record_snapshot_destroy-during-rmdir.patch [new file with mode: 0644]
queue-5.10/dpaa2-eth-fix-xdp_rxq_info-leak.patch [new file with mode: 0644]
queue-5.10/dpaa2-eth-rename-dpaa2_eth_xdp_release_buf-into-dpaa.patch [new file with mode: 0644]
queue-5.10/dpaa2-eth-update-dpni_get_single_step_cfg-command.patch [new file with mode: 0644]
queue-5.10/dpaa2-eth-update-single_step-register-access.patch [new file with mode: 0644]
queue-5.10/drm-v3d-disable-interrupts-before-resetting-the-gpu.patch [new file with mode: 0644]
queue-5.10/flexfiles-pnfs-update-stats-on-nfs4err_delay-for-v4..patch [new file with mode: 0644]
queue-5.10/mmc-mediatek-use-data-instead-of-mrq-parameter-from-.patch [new file with mode: 0644]
queue-5.10/mtk-sd-prevent-memory-corruption-from-dma-map-failur.patch [new file with mode: 0644]
queue-5.10/mtk-sd-reset-host-mrq-on-prepare_data-error.patch [new file with mode: 0644]
queue-5.10/net-dpaa2-eth-rearrange-variable-in-dpaa2_eth_get_et.patch [new file with mode: 0644]
queue-5.10/nfsv4-flexfiles-fix-handling-of-nfs-level-errors-in-.patch [new file with mode: 0644]
queue-5.10/rdma-mlx5-fix-vport-loopback-for-mpv-device.patch [new file with mode: 0644]
queue-5.10/regulator-gpio-fix-the-out-of-bounds-access-to-drvda.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/virtio-net-ensure-the-received-length-does-not-excee.patch [new file with mode: 0644]
queue-5.10/virtio_ring-introduce-dma-map-api-for-virtqueue.patch [new file with mode: 0644]
queue-5.10/virtio_ring-introduce-dma-sync-api-for-virtqueue.patch [new file with mode: 0644]

diff --git a/queue-5.10/btrfs-propagate-last_unlink_trans-earlier-when-doing.patch b/queue-5.10/btrfs-propagate-last_unlink_trans-earlier-when-doing.patch
new file mode 100644 (file)
index 0000000..f010428
--- /dev/null
@@ -0,0 +1,101 @@
+From 5934d2c4aa669134bae7d89232ec480a3dbc9ecb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jun 2025 15:54:05 +0100
+Subject: btrfs: propagate last_unlink_trans earlier when doing a rmdir
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit c466e33e729a0ee017d10d919cba18f503853c60 ]
+
+In case the removed directory had a snapshot that was deleted, we are
+propagating its inode's last_unlink_trans to the parent directory after
+we removed the entry from the parent directory. This leaves a small race
+window where someone can log the parent directory after we removed the
+entry and before we updated last_unlink_trans, and as a result if we ever
+try to replay such a log tree, we will fail since we will attempt to
+remove a snapshot during log replay, which is currently not possible and
+results in the log replay (and mount) to fail. This is the type of failure
+described in commit 1ec9a1ae1e30 ("Btrfs: fix unreplayable log after
+snapshot delete + parent dir fsync").
+
+So fix this by propagating the last_unlink_trans to the parent directory
+before we remove the entry from it.
+
+Fixes: 44f714dae50a ("Btrfs: improve performance on fsync against new inode after rename/unlink")
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 36 ++++++++++++++++++------------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8d7ca8a21525a..94a338de3a8e9 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4150,7 +4150,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+       int err = 0;
+       struct btrfs_root *root = BTRFS_I(dir)->root;
+       struct btrfs_trans_handle *trans;
+-      u64 last_unlink_trans;
+       if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
+               return -ENOTEMPTY;
+@@ -4161,6 +4160,23 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
++      /*
++       * Propagate the last_unlink_trans value of the deleted dir to its
++       * parent directory. This is to prevent an unrecoverable log tree in the
++       * case we do something like this:
++       * 1) create dir foo
++       * 2) create snapshot under dir foo
++       * 3) delete the snapshot
++       * 4) rmdir foo
++       * 5) mkdir foo
++       * 6) fsync foo or some file inside foo
++       *
++       * This is because we can't unlink other roots when replaying the dir
++       * deletes for directory foo.
++       */
++      if (BTRFS_I(inode)->last_unlink_trans >= trans->transid)
++              BTRFS_I(dir)->last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
++
+       if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+               err = btrfs_unlink_subvol(trans, dir, dentry);
+               goto out;
+@@ -4170,28 +4186,12 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+       if (err)
+               goto out;
+-      last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+-
+       /* now the directory is empty */
+       err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
+                       BTRFS_I(d_inode(dentry)), dentry->d_name.name,
+                       dentry->d_name.len);
+-      if (!err) {
++      if (!err)
+               btrfs_i_size_write(BTRFS_I(inode), 0);
+-              /*
+-               * Propagate the last_unlink_trans value of the deleted dir to
+-               * its parent directory. This is to prevent an unrecoverable
+-               * log tree in the case we do something like this:
+-               * 1) create dir foo
+-               * 2) create snapshot under dir foo
+-               * 3) delete the snapshot
+-               * 4) rmdir foo
+-               * 5) mkdir foo
+-               * 6) fsync foo or some file inside foo
+-               */
+-              if (last_unlink_trans >= trans->transid)
+-                      BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
+-      }
+ out:
+       btrfs_end_transaction(trans);
+       btrfs_btree_balance_dirty(root->fs_info);
+-- 
+2.39.5
+
diff --git a/queue-5.10/btrfs-use-btrfs_record_snapshot_destroy-during-rmdir.patch b/queue-5.10/btrfs-use-btrfs_record_snapshot_destroy-during-rmdir.patch
new file mode 100644 (file)
index 0000000..8b4cfef
--- /dev/null
@@ -0,0 +1,46 @@
+From b8b5b88c31f83fcde139054a51437ab06973aa73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jun 2025 16:37:01 +0100
+Subject: btrfs: use btrfs_record_snapshot_destroy() during rmdir
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 157501b0469969fc1ba53add5049575aadd79d80 ]
+
+We are setting the parent directory's last_unlink_trans directly which
+may result in a concurrent task starting to log the directory not see the
+update and therefore can log the directory after we removed a child
+directory which had a snapshot within instead of falling back to a
+transaction commit. Replaying such a log tree would result in a mount
+failure since we can't currently delete snapshots (and subvolumes) during
+log replay. This is the type of failure described in commit 1ec9a1ae1e30
+("Btrfs: fix unreplayable log after snapshot delete + parent dir fsync").
+
+Fix this by using btrfs_record_snapshot_destroy() which updates the
+last_unlink_trans field while holding the inode's log_mutex lock.
+
+Fixes: 44f714dae50a ("Btrfs: improve performance on fsync against new inode after rename/unlink")
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 94a338de3a8e9..82805ac91b06c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4175,7 +4175,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+        * deletes for directory foo.
+        */
+       if (BTRFS_I(inode)->last_unlink_trans >= trans->transid)
+-              BTRFS_I(dir)->last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
++              btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
+       if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+               err = btrfs_unlink_subvol(trans, dir, dentry);
+-- 
+2.39.5
+
diff --git a/queue-5.10/dpaa2-eth-fix-xdp_rxq_info-leak.patch b/queue-5.10/dpaa2-eth-fix-xdp_rxq_info-leak.patch
new file mode 100644 (file)
index 0000000..2da940c
--- /dev/null
@@ -0,0 +1,101 @@
+From 62fb613074ebd697ebcf5b22a841bcd80bcd6100 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Jul 2025 16:06:22 -0400
+Subject: dpaa2-eth: fix xdp_rxq_info leak
+
+From: Fushuai Wang <wangfushuai@baidu.com>
+
+[ Upstream commit 2def09ead4ad5907988b655d1e1454003aaf8297 ]
+
+The driver registered xdp_rxq_info structures via xdp_rxq_info_reg()
+but failed to properly unregister them in error paths and during
+removal.
+
+Fixes: d678be1dc1ec ("dpaa2-eth: add XDP_REDIRECT support")
+Signed-off-by: Fushuai Wang <wangfushuai@baidu.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20250626133003.80136-1-wangfushuai@baidu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/freescale/dpaa2/dpaa2-eth.c  | 26 +++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index ef5356ac604ca..776f624e3b8ee 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -3425,6 +3425,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
+                                        MEM_TYPE_PAGE_ORDER0, NULL);
+       if (err) {
+               dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
++              xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
+               return err;
+       }
+@@ -3917,17 +3918,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
+                       return -EINVAL;
+               }
+               if (err)
+-                      return err;
++                      goto out;
+       }
+       err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
+                           DPNI_QUEUE_TX, &priv->tx_qdid);
+       if (err) {
+               dev_err(dev, "dpni_get_qdid() failed\n");
+-              return err;
++              goto out;
+       }
+       return 0;
++
++out:
++      while (i--) {
++              if (priv->fq[i].type == DPAA2_RX_FQ &&
++                  xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
++                      xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
++      }
++      return err;
+ }
+ /* Allocate rings for storing incoming frame descriptors */
+@@ -4264,6 +4273,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
+       }
+ }
++static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
++{
++      int i;
++
++      for (i = 0; i < priv->num_fqs; i++) {
++              if (priv->fq[i].type == DPAA2_RX_FQ &&
++                  xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
++                      xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
++      }
++}
++
+ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+ {
+       struct device *dev;
+@@ -4450,6 +4470,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+       free_percpu(priv->percpu_stats);
+ err_alloc_percpu_stats:
+       dpaa2_eth_del_ch_napi(priv);
++      dpaa2_eth_free_rx_xdp_rxq(priv);
+ err_bind:
+       dpaa2_eth_free_dpbp(priv);
+ err_dpbp_setup:
+@@ -4501,6 +4522,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+       free_percpu(priv->percpu_extras);
+       dpaa2_eth_del_ch_napi(priv);
++      dpaa2_eth_free_rx_xdp_rxq(priv);
+       dpaa2_eth_free_dpbp(priv);
+       dpaa2_eth_free_dpio(priv);
+       dpaa2_eth_free_dpni(priv);
+-- 
+2.39.5
+
diff --git a/queue-5.10/dpaa2-eth-rename-dpaa2_eth_xdp_release_buf-into-dpaa.patch b/queue-5.10/dpaa2-eth-rename-dpaa2_eth_xdp_release_buf-into-dpaa.patch
new file mode 100644 (file)
index 0000000..261308d
--- /dev/null
@@ -0,0 +1,127 @@
+From 0c16c7822a651deb7a22deeb9d214445220de97d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Apr 2021 12:55:30 +0300
+Subject: dpaa2-eth: rename dpaa2_eth_xdp_release_buf into
+ dpaa2_eth_recycle_buf
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 28d137cc8c0bd2c9501b8eb0855b631289c7b4a3 ]
+
+Rename the dpaa2_eth_xdp_release_buf function into dpaa2_eth_recycle_buf
+since in the next patches we'll be using the same recycle mechanism for
+the normal stack path beside for XDP_DROP.
+
+Also, rename the array which holds the buffers to be recycled so that it
+does not have any reference to XDP.
+
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 2def09ead4ad ("dpaa2-eth: fix xdp_rxq_info leak")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/freescale/dpaa2/dpaa2-eth.c  | 26 +++++++++----------
+ .../net/ethernet/freescale/dpaa2/dpaa2-eth.h  |  6 +++--
+ 2 files changed, 17 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index fa202fea537f8..fa799cc044426 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -223,31 +223,31 @@ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
+       }
+ }
+-static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
+-                                    struct dpaa2_eth_channel *ch,
+-                                    dma_addr_t addr)
++static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
++                                struct dpaa2_eth_channel *ch,
++                                dma_addr_t addr)
+ {
+       int retries = 0;
+       int err;
+-      ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
+-      if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
++      ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
++      if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
+               return;
+       while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+-                                             ch->xdp.drop_bufs,
+-                                             ch->xdp.drop_cnt)) == -EBUSY) {
++                                             ch->recycled_bufs,
++                                             ch->recycled_bufs_cnt)) == -EBUSY) {
+               if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+                       break;
+               cpu_relax();
+       }
+       if (err) {
+-              dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+-              ch->buf_count -= ch->xdp.drop_cnt;
++              dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
++              ch->buf_count -= ch->recycled_bufs_cnt;
+       }
+-      ch->xdp.drop_cnt = 0;
++      ch->recycled_bufs_cnt = 0;
+ }
+ static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
+@@ -300,7 +300,7 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
+               ch->stats.xdp_tx++;
+       }
+       for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
+-              dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
++              dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+               percpu_stats->tx_errors++;
+               ch->stats.xdp_tx_err++;
+       }
+@@ -386,7 +386,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+               trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+               fallthrough;
+       case XDP_DROP:
+-              dpaa2_eth_xdp_release_buf(priv, ch, addr);
++              dpaa2_eth_recycle_buf(priv, ch, addr);
+               ch->stats.xdp_drop++;
+               break;
+       case XDP_REDIRECT:
+@@ -407,7 +407,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+                               free_pages((unsigned long)vaddr, 0);
+                       } else {
+                               ch->buf_count++;
+-                              dpaa2_eth_xdp_release_buf(priv, ch, addr);
++                              dpaa2_eth_recycle_buf(priv, ch, addr);
+                       }
+                       ch->stats.xdp_drop++;
+               } else {
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index 2825f53e7e9b1..e143d66ca2474 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -438,8 +438,6 @@ struct dpaa2_eth_fq {
+ struct dpaa2_eth_ch_xdp {
+       struct bpf_prog *prog;
+-      u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
+-      int drop_cnt;
+       unsigned int res;
+ };
+@@ -457,6 +455,10 @@ struct dpaa2_eth_channel {
+       struct dpaa2_eth_ch_xdp xdp;
+       struct xdp_rxq_info xdp_rxq;
+       struct list_head *rx_list;
++
++      /* Buffers to be recycled back in the buffer pool */
++      u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
++      int recycled_bufs_cnt;
+ };
+ struct dpaa2_eth_dist_fields {
+-- 
+2.39.5
+
diff --git a/queue-5.10/dpaa2-eth-update-dpni_get_single_step_cfg-command.patch b/queue-5.10/dpaa2-eth-update-dpni_get_single_step_cfg-command.patch
new file mode 100644 (file)
index 0000000..f1f6a74
--- /dev/null
@@ -0,0 +1,100 @@
+From 90e4f12476c0203bbf5dec4ed5a460235e4d94f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Feb 2022 22:22:00 +0200
+Subject: dpaa2-eth: Update dpni_get_single_step_cfg command
+
+From: Radu Bulie <radu-andrei.bulie@nxp.com>
+
+[ Upstream commit 9572594ecf027a2b1828e42c26fb55cbd3219708 ]
+
+dpni_get_single_step_cfg is an MC firmware command used for
+retrieving the contents of SINGLE_STEP 1588 register available
+in a DPMAC.
+
+This patch adds a new version of this command that returns as an extra
+argument the physical base address of the aforementioned register.
+The address will be used to directly modify the contents of the
+SINGLE_STEP register instead of invoking the MC command
+dpni_set_single_step_cgf. The former approach introduced huge delays on
+the TX datapath when one step PTP events were transmitted. This led to low
+throughput and high latencies observed in the PTP correction field.
+
+Signed-off-by: Radu Bulie <radu-andrei.bulie@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 2def09ead4ad ("dpaa2-eth: fix xdp_rxq_info leak")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h | 6 +++++-
+ drivers/net/ethernet/freescale/dpaa2/dpni.c     | 2 ++
+ drivers/net/ethernet/freescale/dpaa2/dpni.h     | 6 ++++++
+ 3 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+index 90453dc7baefe..a0dfd25c6bd4a 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+@@ -94,7 +94,7 @@
+ #define DPNI_CMDID_GET_LINK_CFG                               DPNI_CMD(0x278)
+ #define DPNI_CMDID_SET_SINGLE_STEP_CFG                        DPNI_CMD(0x279)
+-#define DPNI_CMDID_GET_SINGLE_STEP_CFG                        DPNI_CMD(0x27a)
++#define DPNI_CMDID_GET_SINGLE_STEP_CFG                        DPNI_CMD_V2(0x27a)
+ /* Macros for accessing command fields smaller than 1byte */
+ #define DPNI_MASK(field)      \
+@@ -654,12 +654,16 @@ struct dpni_cmd_single_step_cfg {
+       __le16 flags;
+       __le16 offset;
+       __le32 peer_delay;
++      __le32 ptp_onestep_reg_base;
++      __le32 pad0;
+ };
+ struct dpni_rsp_single_step_cfg {
+       __le16 flags;
+       __le16 offset;
+       __le32 peer_delay;
++      __le32 ptp_onestep_reg_base;
++      __le32 pad0;
+ };
+ #endif /* _FSL_DPNI_CMD_H */
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
+index 6ea7db66a6322..d248a40fbc3f8 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
+@@ -2037,6 +2037,8 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+       ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
+                                           PTP_CH_UPDATE) ? 1 : 0;
+       ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
++      ptp_cfg->ptp_onestep_reg_base =
++                                le32_to_cpu(rsp_params->ptp_onestep_reg_base);
+       return err;
+ }
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
+index e7b9e195b534b..f854450983983 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
+@@ -1096,12 +1096,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+  * @peer_delay:       For peer-to-peer transparent clocks add this value to the
+  *            correction field in addition to the transient time update.
+  *            The value expresses nanoseconds.
++ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
++ *                      is used to update directly the register contents.
++ *                      User has to create an address mapping for it.
++ *
++ *
+  */
+ struct dpni_single_step_cfg {
+       u8      en;
+       u8      ch_update;
+       u16     offset;
+       u32     peer_delay;
++      u32     ptp_onestep_reg_base;
+ };
+ int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+-- 
+2.39.5
+
diff --git a/queue-5.10/dpaa2-eth-update-single_step-register-access.patch b/queue-5.10/dpaa2-eth-update-single_step-register-access.patch
new file mode 100644 (file)
index 0000000..71ab661
--- /dev/null
@@ -0,0 +1,234 @@
+From 9af371acd555fd2e87aa8b1ad0d07233aeffedba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Feb 2022 22:22:01 +0200
+Subject: dpaa2-eth: Update SINGLE_STEP register access
+
+From: Radu Bulie <radu-andrei.bulie@nxp.com>
+
+[ Upstream commit c4680c978567328a696fd2400bbf58a36cff95d1 ]
+
+DPAA2 MAC supports 1588 one step timestamping.
+If this option is enabled then for each transmitted PTP event packet,
+the 1588 SINGLE_STEP register is accessed to modify the following fields:
+
+-offset of the correction field inside the PTP packet
+-UDP checksum update bit,  in case the PTP event packet has
+ UDP encapsulation
+
+These values can change any time, because there may be multiple
+PTP clients connected, that receive various 1588 frame types:
+- L2 only frame
+- UDP / Ipv4
+- UDP / Ipv6
+- other
+
+The current implementation uses dpni_set_single_step_cfg to update the
+SINLGE_STEP register.
+Using an MC command  on the Tx datapath for each transmitted 1588 message
+introduces high delays, leading to low throughput and consequently to a
+small number of supported PTP clients. Besides these, the nanosecond
+correction field from the PTP packet will contain the high delay from the
+driver which together with the originTimestamp will render timestamp
+values that are unacceptable in a GM clock implementation.
+
+This patch updates the Tx datapath for 1588 messages when single step
+timestamp is enabled and provides direct access to SINGLE_STEP register,
+eliminating the  overhead caused by the dpni_set_single_step_cfg
+MC command. MC version >= 10.32 implements this functionality.
+If the MC version does not have support for returning the
+single step register base address, the driver will use
+dpni_set_single_step_cfg command for updates operations.
+
+All the delay introduced by dpni_set_single_step_cfg
+function will be eliminated (if MC version has support for returning the
+base address of the single step register), improving the egress driver
+performance for PTP packets when single step timestamping is enabled.
+
+Before these changes the maximum throughput for 1588 messages with
+single step hardware timestamp enabled was around 2000pps.
+After the updates the throughput increased up to 32.82 Mbps / 46631.02 pps.
+
+Signed-off-by: Radu Bulie <radu-andrei.bulie@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 2def09ead4ad ("dpaa2-eth: fix xdp_rxq_info leak")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/freescale/dpaa2/dpaa2-eth.c  | 89 +++++++++++++++++--
+ .../net/ethernet/freescale/dpaa2/dpaa2-eth.h  | 14 ++-
+ 2 files changed, 93 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index fa799cc044426..ef5356ac604ca 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -34,6 +34,75 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+ struct ptp_qoriq *dpaa2_ptp;
+ EXPORT_SYMBOL(dpaa2_ptp);
++static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
++{
++      priv->features = 0;
++
++      if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
++                                 DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
++              priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
++}
++
++static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
++                                            u32 offset, u8 udp)
++{
++      struct dpni_single_step_cfg cfg;
++
++      cfg.en = 1;
++      cfg.ch_update = udp;
++      cfg.offset = offset;
++      cfg.peer_delay = 0;
++
++      if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
++              WARN_ONCE(1, "Failed to set single step register");
++}
++
++static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
++                                          u32 offset, u8 udp)
++{
++      u32 val = 0;
++
++      val = DPAA2_PTP_SINGLE_STEP_ENABLE |
++             DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
++
++      if (udp)
++              val |= DPAA2_PTP_SINGLE_STEP_CH;
++
++      if (priv->onestep_reg_base)
++              writel(val, priv->onestep_reg_base);
++}
++
++static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
++{
++      struct device *dev = priv->net_dev->dev.parent;
++      struct dpni_single_step_cfg ptp_cfg;
++
++      priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
++
++      if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
++              return;
++
++      if (dpni_get_single_step_cfg(priv->mc_io, 0,
++                                   priv->mc_token, &ptp_cfg)) {
++              dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
++              return;
++      }
++
++      if (!ptp_cfg.ptp_onestep_reg_base) {
++              dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
++              return;
++      }
++
++      priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
++                                       sizeof(u32));
++      if (!priv->onestep_reg_base) {
++              dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
++              return;
++      }
++
++      priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
++}
++
+ static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+                               dma_addr_t iova_addr)
+ {
+@@ -668,7 +737,6 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
+                                      struct sk_buff *skb)
+ {
+       struct ptp_tstamp origin_timestamp;
+-      struct dpni_single_step_cfg cfg;
+       u8 msgtype, twostep, udp;
+       struct dpaa2_faead *faead;
+       struct dpaa2_fas *fas;
+@@ -722,14 +790,12 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
+                       htonl(origin_timestamp.sec_lsb);
+               *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
+-              cfg.en = 1;
+-              cfg.ch_update = udp;
+-              cfg.offset = offset1;
+-              cfg.peer_delay = 0;
++              if (priv->ptp_correction_off == offset1)
++                      return;
++
++              priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
++              priv->ptp_correction_off = offset1;
+-              if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
+-                                           &cfg))
+-                      WARN_ONCE(1, "Failed to set single step register");
+       }
+ }
+@@ -2112,6 +2178,9 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+       }
++      if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
++              dpaa2_ptp_onestep_reg_update_method(priv);
++
+       return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+                       -EFAULT : 0;
+ }
+@@ -4009,6 +4078,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
+               return err;
+       }
++      dpaa2_eth_detect_features(priv);
++
+       /* Capabilities listing */
+       supported |= IFF_LIVE_ADDR_CHANGE;
+@@ -4433,6 +4504,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+       dpaa2_eth_free_dpbp(priv);
+       dpaa2_eth_free_dpio(priv);
+       dpaa2_eth_free_dpni(priv);
++      if (priv->onestep_reg_base)
++              iounmap(priv->onestep_reg_base);
+       fsl_mc_portal_free(priv->mc_io);
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index e143d66ca2474..5934b1b4ee973 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -504,12 +504,15 @@ struct dpaa2_eth_priv {
+       u8 num_channels;
+       struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+       struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
+-
++      unsigned long features;
+       struct dpni_attr dpni_attrs;
+       u16 dpni_ver_major;
+       u16 dpni_ver_minor;
+       u16 tx_data_offset;
+-
++      void __iomem *onestep_reg_base;
++      u8 ptp_correction_off;
++      void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
++                                          u32 offset, u8 udp);
+       struct fsl_mc_device *dpbp_dev;
+       u16 rx_buf_size;
+       u16 bpid;
+@@ -647,6 +650,13 @@ enum dpaa2_eth_rx_dist {
+ #define DPAA2_ETH_DIST_L4DST          BIT(8)
+ #define DPAA2_ETH_DIST_ALL            (~0ULL)
++#define DPNI_PTP_ONESTEP_VER_MAJOR 8
++#define DPNI_PTP_ONESTEP_VER_MINOR 2
++#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
++#define DPAA2_PTP_SINGLE_STEP_ENABLE  BIT(31)
++#define DPAA2_PTP_SINGLE_STEP_CH      BIT(7)
++#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
++
+ #define DPNI_PAUSE_VER_MAJOR          7
+ #define DPNI_PAUSE_VER_MINOR          13
+ #define dpaa2_eth_has_pause_support(priv)                     \
+-- 
+2.39.5
+
diff --git a/queue-5.10/drm-v3d-disable-interrupts-before-resetting-the-gpu.patch b/queue-5.10/drm-v3d-disable-interrupts-before-resetting-the-gpu.patch
new file mode 100644 (file)
index 0000000..a57c455
--- /dev/null
@@ -0,0 +1,215 @@
+From d26c3834faddd3bcf1ccce713368a6bbc86bbe8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Jun 2025 19:42:42 -0300
+Subject: drm/v3d: Disable interrupts before resetting the GPU
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maíra Canal <mcanal@igalia.com>
+
+[ Upstream commit 226862f50a7a88e4e4de9abbf36c64d19acd6fd0 ]
+
+Currently, an interrupt can be triggered during a GPU reset, which can
+lead to GPU hangs and NULL pointer dereference in an interrupt context
+as shown in the following trace:
+
+ [  314.035040] Unable to handle kernel NULL pointer dereference at virtual address 00000000000000c0
+ [  314.043822] Mem abort info:
+ [  314.046606]   ESR = 0x0000000096000005
+ [  314.050347]   EC = 0x25: DABT (current EL), IL = 32 bits
+ [  314.055651]   SET = 0, FnV = 0
+ [  314.058695]   EA = 0, S1PTW = 0
+ [  314.061826]   FSC = 0x05: level 1 translation fault
+ [  314.066694] Data abort info:
+ [  314.069564]   ISV = 0, ISS = 0x00000005, ISS2 = 0x00000000
+ [  314.075039]   CM = 0, WnR = 0, TnD = 0, TagAccess = 0
+ [  314.080080]   GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
+ [  314.085382] user pgtable: 4k pages, 39-bit VAs, pgdp=0000000102728000
+ [  314.091814] [00000000000000c0] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000
+ [  314.100511] Internal error: Oops: 0000000096000005 [#1] PREEMPT SMP
+ [  314.106770] Modules linked in: v3d i2c_brcmstb vc4 snd_soc_hdmi_codec gpu_sched drm_shmem_helper drm_display_helper cec drm_dma_helper drm_kms_helper drm drm_panel_orientation_quirks snd_soc_core snd_compress snd_pcm_dmaengine snd_pcm snd_timer snd backlight
+ [  314.129654] CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted 6.12.25+rpt-rpi-v8 #1  Debian 1:6.12.25-1+rpt1
+ [  314.139388] Hardware name: Raspberry Pi 4 Model B Rev 1.4 (DT)
+ [  314.145211] pstate: 600000c5 (nZCv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ [  314.152165] pc : v3d_irq+0xec/0x2e0 [v3d]
+ [  314.156187] lr : v3d_irq+0xe0/0x2e0 [v3d]
+ [  314.160198] sp : ffffffc080003ea0
+ [  314.163502] x29: ffffffc080003ea0 x28: ffffffec1f184980 x27: 021202b000000000
+ [  314.170633] x26: ffffffec1f17f630 x25: ffffff8101372000 x24: ffffffec1f17d9f0
+ [  314.177764] x23: 000000000000002a x22: 000000000000002a x21: ffffff8103252000
+ [  314.184895] x20: 0000000000000001 x19: 00000000deadbeef x18: 0000000000000000
+ [  314.192026] x17: ffffff94e51d2000 x16: ffffffec1dac3cb0 x15: c306000000000000
+ [  314.199156] x14: 0000000000000000 x13: b2fc982e03cc5168 x12: 0000000000000001
+ [  314.206286] x11: ffffff8103f8bcc0 x10: ffffffec1f196868 x9 : ffffffec1dac3874
+ [  314.213416] x8 : 0000000000000000 x7 : 0000000000042a3a x6 : ffffff810017a180
+ [  314.220547] x5 : ffffffec1ebad400 x4 : ffffffec1ebad320 x3 : 00000000000bebeb
+ [  314.227677] x2 : 0000000000000000 x1 : 0000000000000000 x0 : 0000000000000000
+ [  314.234807] Call trace:
+ [  314.237243]  v3d_irq+0xec/0x2e0 [v3d]
+ [  314.240906]  __handle_irq_event_percpu+0x58/0x218
+ [  314.245609]  handle_irq_event+0x54/0xb8
+ [  314.249439]  handle_fasteoi_irq+0xac/0x240
+ [  314.253527]  handle_irq_desc+0x48/0x68
+ [  314.257269]  generic_handle_domain_irq+0x24/0x38
+ [  314.261879]  gic_handle_irq+0x48/0xd8
+ [  314.265533]  call_on_irq_stack+0x24/0x58
+ [  314.269448]  do_interrupt_handler+0x88/0x98
+ [  314.273624]  el1_interrupt+0x34/0x68
+ [  314.277193]  el1h_64_irq_handler+0x18/0x28
+ [  314.281281]  el1h_64_irq+0x64/0x68
+ [  314.284673]  default_idle_call+0x3c/0x168
+ [  314.288675]  do_idle+0x1fc/0x230
+ [  314.291895]  cpu_startup_entry+0x3c/0x50
+ [  314.295810]  rest_init+0xe4/0xf0
+ [  314.299030]  start_kernel+0x5e8/0x790
+ [  314.302684]  __primary_switched+0x80/0x90
+ [  314.306691] Code: 940029eb 360ffc13 f9442ea0 52800001 (f9406017)
+ [  314.312775] ---[ end trace 0000000000000000 ]---
+ [  314.317384] Kernel panic - not syncing: Oops: Fatal exception in interrupt
+ [  314.324249] SMP: stopping secondary CPUs
+ [  314.328167] Kernel Offset: 0x2b9da00000 from 0xffffffc080000000
+ [  314.334076] PHYS_OFFSET: 0x0
+ [  314.336946] CPU features: 0x08,00002013,c0200000,0200421b
+ [  314.342337] Memory Limit: none
+ [  314.345382] ---[ end Kernel panic - not syncing: Oops: Fatal exception in interrupt ]---
+
+Before resetting the GPU, it's necessary to disable all interrupts and
+deal with any interrupt handler still in-flight. Otherwise, the GPU might
+reset with jobs still running, or yet, an interrupt could be handled
+during the reset.
+
+Cc: stable@vger.kernel.org
+Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+")
+Reviewed-by: Juan A. Suarez <jasuarez@igalia.com>
+Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
+Link: https://lore.kernel.org/r/20250628224243.47599-1-mcanal@igalia.com
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/v3d/v3d_drv.h |  7 +++++++
+ drivers/gpu/drm/v3d/v3d_gem.c |  2 ++
+ drivers/gpu/drm/v3d/v3d_irq.c | 38 ++++++++++++++++++++++++++---------
+ 3 files changed, 37 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
+index 8a390738d65ba..a605b31a8224c 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.h
++++ b/drivers/gpu/drm/v3d/v3d_drv.h
+@@ -37,6 +37,12 @@ struct v3d_queue_state {
+       u64 emit_seqno;
+ };
++enum v3d_irq {
++      V3D_CORE_IRQ,
++      V3D_HUB_IRQ,
++      V3D_MAX_IRQS,
++};
++
+ struct v3d_dev {
+       struct drm_device drm;
+@@ -46,6 +52,7 @@ struct v3d_dev {
+       int ver;
+       bool single_irq_line;
++      int irq[V3D_MAX_IRQS];
+       void __iomem *hub_regs;
+       void __iomem *core_regs[3];
+       void __iomem *bridge_regs;
+diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
+index 64fe63c1938f5..32cc461937cf3 100644
+--- a/drivers/gpu/drm/v3d/v3d_gem.c
++++ b/drivers/gpu/drm/v3d/v3d_gem.c
+@@ -120,6 +120,8 @@ v3d_reset(struct v3d_dev *v3d)
+       if (false)
+               v3d_idle_axi(v3d, 0);
++      v3d_irq_disable(v3d);
++
+       v3d_idle_gca(v3d);
+       v3d_reset_v3d(v3d);
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index c678c4ce4f113..96766a788215f 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -218,7 +218,7 @@ v3d_hub_irq(int irq, void *arg)
+ int
+ v3d_irq_init(struct v3d_dev *v3d)
+ {
+-      int irq1, ret, core;
++      int irq, ret, core;
+       INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
+@@ -229,17 +229,24 @@ v3d_irq_init(struct v3d_dev *v3d)
+               V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
+       V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
+-      irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
+-      if (irq1 == -EPROBE_DEFER)
+-              return irq1;
+-      if (irq1 > 0) {
+-              ret = devm_request_irq(v3d->drm.dev, irq1,
++      irq = platform_get_irq(v3d_to_pdev(v3d), 1);
++      if (irq == -EPROBE_DEFER)
++              return irq;
++      if (irq > 0) {
++              v3d->irq[V3D_CORE_IRQ] = irq;
++
++              ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
+                                      v3d_irq, IRQF_SHARED,
+                                      "v3d_core0", v3d);
+               if (ret)
+                       goto fail;
+-              ret = devm_request_irq(v3d->drm.dev,
+-                                     platform_get_irq(v3d_to_pdev(v3d), 0),
++
++              irq = platform_get_irq(v3d_to_pdev(v3d), 0);
++              if (irq < 0)
++                      return irq;
++              v3d->irq[V3D_HUB_IRQ] = irq;
++
++              ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ],
+                                      v3d_hub_irq, IRQF_SHARED,
+                                      "v3d_hub", v3d);
+               if (ret)
+@@ -247,8 +254,12 @@ v3d_irq_init(struct v3d_dev *v3d)
+       } else {
+               v3d->single_irq_line = true;
+-              ret = devm_request_irq(v3d->drm.dev,
+-                                     platform_get_irq(v3d_to_pdev(v3d), 0),
++              irq = platform_get_irq(v3d_to_pdev(v3d), 0);
++              if (irq < 0)
++                      return irq;
++              v3d->irq[V3D_CORE_IRQ] = irq;
++
++              ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
+                                      v3d_irq, IRQF_SHARED,
+                                      "v3d", v3d);
+               if (ret)
+@@ -283,12 +294,19 @@ void
+ v3d_irq_disable(struct v3d_dev *v3d)
+ {
+       int core;
++      int i;
+       /* Disable all interrupts. */
+       for (core = 0; core < v3d->cores; core++)
+               V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
+       V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
++      /* Finish any interrupt handler still in flight. */
++      for (i = 0; i < V3D_MAX_IRQS; i++) {
++              if (v3d->irq[i])
++                      synchronize_irq(v3d->irq[i]);
++      }
++
+       /* Clear any pending interrupts we might have left. */
+       for (core = 0; core < v3d->cores; core++)
+               V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
+-- 
+2.39.5
+
diff --git a/queue-5.10/flexfiles-pnfs-update-stats-on-nfs4err_delay-for-v4..patch b/queue-5.10/flexfiles-pnfs-update-stats-on-nfs4err_delay-for-v4..patch
new file mode 100644 (file)
index 0000000..e8d0548
--- /dev/null
@@ -0,0 +1,36 @@
+From 683c02924f935e0e74de8dc5f8dd258ad9978d0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 May 2025 21:04:15 +0200
+Subject: flexfiles/pNFS: update stats on NFS4ERR_DELAY for v4.1 DSes
+
+From: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+
+[ Upstream commit e3e3775392f3f0f3e3044f8c162bf47858e01759 ]
+
+On NFS4ERR_DELAY nfs slient updates its stats, but misses for
+flexfiles v4.1 DSes.
+
+Signed-off-by: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Stable-dep-of: 38074de35b01 ("NFSv4/flexfiles: Fix handling of NFS level errors in I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/flexfilelayout/flexfilelayout.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index ce9c2d1f54ae0..46b106785eb82 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1127,6 +1127,8 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+               nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
+               break;
+       case -NFS4ERR_DELAY:
++              nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
++              fallthrough;
+       case -NFS4ERR_GRACE:
+               rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
+               break;
+-- 
+2.39.5
+
diff --git a/queue-5.10/mmc-mediatek-use-data-instead-of-mrq-parameter-from-.patch b/queue-5.10/mmc-mediatek-use-data-instead-of-mrq-parameter-from-.patch
new file mode 100644 (file)
index 0000000..5989dad
--- /dev/null
@@ -0,0 +1,99 @@
+From 85cbe826cdb1015ef64671585ee34c0e054065f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 May 2021 18:09:00 +0800
+Subject: mmc: mediatek: use data instead of mrq parameter from
+ msdc_{un}prepare_data()
+
+From: Yue Hu <huyue2@yulong.com>
+
+[ Upstream commit 151071351bb6f3d1861e99a22c4cebadf81911a0 ]
+
+We already have 'mrq->data' before calling these two functions, no
+need to find it again via 'mrq->data' internally. Also remove local
+data variable accordingly.
+
+Signed-off-by: Yue Hu <huyue2@yulong.com>
+Link: https://lore.kernel.org/r/20210517100900.1620-1-zbestahu@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: f5de469990f1 ("mtk-sd: Prevent memory corruption from DMA map failure")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/mtk-sd.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 3f82e0f9dc057..f6bb3b45b37ff 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -708,10 +708,8 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
+       writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA);
+ }
+-static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
++static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
+ {
+-      struct mmc_data *data = mrq->data;
+-
+       if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
+               data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
+                                           mmc_get_dma_dir(data));
+@@ -720,10 +718,8 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
+       }
+ }
+-static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
++static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
+ {
+-      struct mmc_data *data = mrq->data;
+-
+       if (data->host_cookie & MSDC_ASYNC_FLAG)
+               return;
+@@ -1116,7 +1112,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
+       msdc_track_cmd_data(host, mrq->cmd, mrq->data);
+       if (mrq->data)
+-              msdc_unprepare_data(host, mrq);
++              msdc_unprepare_data(host, mrq->data);
+       if (host->error)
+               msdc_reset_hw(host);
+       mmc_request_done(mmc_from_priv(host), mrq);
+@@ -1287,7 +1283,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
+       host->mrq = mrq;
+       if (mrq->data)
+-              msdc_prepare_data(host, mrq);
++              msdc_prepare_data(host, mrq->data);
+       /* if SBC is required, we have HW option and SW option.
+        * if HW option is enabled, and SBC does not have "special" flags,
+@@ -1308,7 +1304,7 @@ static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
+       if (!data)
+               return;
+-      msdc_prepare_data(host, mrq);
++      msdc_prepare_data(host, data);
+       data->host_cookie |= MSDC_ASYNC_FLAG;
+ }
+@@ -1316,14 +1312,14 @@ static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+               int err)
+ {
+       struct msdc_host *host = mmc_priv(mmc);
+-      struct mmc_data *data;
++      struct mmc_data *data = mrq->data;
+-      data = mrq->data;
+       if (!data)
+               return;
++
+       if (data->host_cookie) {
+               data->host_cookie &= ~MSDC_ASYNC_FLAG;
+-              msdc_unprepare_data(host, mrq);
++              msdc_unprepare_data(host, data);
+       }
+ }
+-- 
+2.39.5
+
diff --git a/queue-5.10/mtk-sd-prevent-memory-corruption-from-dma-map-failur.patch b/queue-5.10/mtk-sd-prevent-memory-corruption-from-dma-map-failur.patch
new file mode 100644 (file)
index 0000000..2ccda62
--- /dev/null
@@ -0,0 +1,65 @@
+From f4585d4824cdd1da06895b579d2417130984809c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Jun 2025 20:26:10 +0900
+Subject: mtk-sd: Prevent memory corruption from DMA map failure
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+[ Upstream commit f5de469990f19569627ea0dd56536ff5a13beaa3 ]
+
+If msdc_prepare_data() fails to map the DMA region, the request is
+not prepared for data receiving, but msdc_start_data() proceeds
+the DMA with previous setting.
+Since this will lead a memory corruption, we have to stop the
+request operation soon after the msdc_prepare_data() fails to
+prepare it.
+
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Fixes: 208489032bdd ("mmc: mediatek: Add Mediatek MMC driver")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/174972756982.3337526.6755001617701603082.stgit@mhiramat.tok.corp.google.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/mtk-sd.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index f6bb3b45b37ff..2c998683e3e33 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -718,6 +718,11 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
+       }
+ }
++static bool msdc_data_prepared(struct mmc_data *data)
++{
++      return data->host_cookie & MSDC_PREPARE_FLAG;
++}
++
+ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
+ {
+       if (data->host_cookie & MSDC_ASYNC_FLAG)
+@@ -1282,8 +1287,18 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
+       WARN_ON(host->mrq);
+       host->mrq = mrq;
+-      if (mrq->data)
++      if (mrq->data) {
+               msdc_prepare_data(host, mrq->data);
++              if (!msdc_data_prepared(mrq->data)) {
++                      /*
++                       * Failed to prepare DMA area, fail fast before
++                       * starting any commands.
++                       */
++                      mrq->cmd->error = -ENOSPC;
++                      mmc_request_done(mmc_from_priv(host), mrq);
++                      return;
++              }
++      }
+       /* if SBC is required, we have HW option and SW option.
+        * if HW option is enabled, and SBC does not have "special" flags,
+-- 
+2.39.5
+
diff --git a/queue-5.10/mtk-sd-reset-host-mrq-on-prepare_data-error.patch b/queue-5.10/mtk-sd-reset-host-mrq-on-prepare_data-error.patch
new file mode 100644 (file)
index 0000000..5324773
--- /dev/null
@@ -0,0 +1,38 @@
+From 786db4b314f15f7738e25eff4620835ba259f678 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jun 2025 14:20:37 +0900
+Subject: mtk-sd: reset host->mrq on prepare_data() error
+
+From: Sergey Senozhatsky <senozhatsky@chromium.org>
+
+[ Upstream commit ec54c0a20709ed6e56f40a8d59eee725c31a916b ]
+
+Do not leave host with dangling ->mrq pointer if we hit
+the msdc_prepare_data() error out path.
+
+Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Fixes: f5de469990f1 ("mtk-sd: Prevent memory corruption from DMA map failure")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250625052106.584905-1-senozhatsky@chromium.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/mtk-sd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 2c998683e3e33..8d0f888b219ac 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -1290,6 +1290,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
+       if (mrq->data) {
+               msdc_prepare_data(host, mrq->data);
+               if (!msdc_data_prepared(mrq->data)) {
++                      host->mrq = NULL;
+                       /*
+                        * Failed to prepare DMA area, fail fast before
+                        * starting any commands.
+-- 
+2.39.5
+
diff --git a/queue-5.10/net-dpaa2-eth-rearrange-variable-in-dpaa2_eth_get_et.patch b/queue-5.10/net-dpaa2-eth-rearrange-variable-in-dpaa2_eth_get_et.patch
new file mode 100644 (file)
index 0000000..cda345e
--- /dev/null
@@ -0,0 +1,62 @@
+From d7c721370c032654aa75c21664adaed16882c7de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 17:18:51 +0300
+Subject: net: dpaa2-eth: rearrange variable in dpaa2_eth_get_ethtool_stats
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 3313206827678f6f036eca601a51f6c4524b559a ]
+
+Rearrange the variables in the dpaa2_eth_get_ethtool_stats() function so
+that we adhere to the reverse Christmas tree rule.
+Also, in the next patch we are adding more variables and I didn't know
+where to place them with the current ordering.
+
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 2def09ead4ad ("dpaa2-eth: fix xdp_rxq_info leak")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/freescale/dpaa2/dpaa2-ethtool.c   | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+index f981a523e13a4..d7de60049700f 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+@@ -225,17 +225,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+                                       struct ethtool_stats *stats,
+                                       u64 *data)
+ {
+-      int i = 0;
+-      int j, k, err;
+-      int num_cnt;
+-      union dpni_statistics dpni_stats;
+-      u32 fcnt, bcnt;
+-      u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+-      u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+-      u32 buf_cnt;
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+-      struct dpaa2_eth_drv_stats *extras;
+-      struct dpaa2_eth_ch_stats *ch_stats;
++      union dpni_statistics dpni_stats;
+       int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
+               sizeof(dpni_stats.page_0),
+               sizeof(dpni_stats.page_1),
+@@ -245,6 +236,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+               sizeof(dpni_stats.page_5),
+               sizeof(dpni_stats.page_6),
+       };
++      u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
++      u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
++      struct dpaa2_eth_ch_stats *ch_stats;
++      struct dpaa2_eth_drv_stats *extras;
++      int j, k, err, num_cnt, i = 0;
++      u32 fcnt, bcnt;
++      u32 buf_cnt;
+       memset(data, 0,
+              sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+-- 
+2.39.5
+
diff --git a/queue-5.10/nfsv4-flexfiles-fix-handling-of-nfs-level-errors-in-.patch b/queue-5.10/nfsv4-flexfiles-fix-handling-of-nfs-level-errors-in-.patch
new file mode 100644 (file)
index 0000000..a2c1e24
--- /dev/null
@@ -0,0 +1,250 @@
+From 8654d3a693f92e5ccd3cfad5269b8080bde508d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jun 2025 15:16:11 -0400
+Subject: NFSv4/flexfiles: Fix handling of NFS level errors in I/O
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 38074de35b015df5623f524d6f2b49a0cd395c40 ]
+
+Allow the flexfiles error handling to recognise NFS level errors (as
+opposed to RPC level errors) and handle them separately. The main
+motivator is the NFSERR_PERM errors that get returned if the NFS client
+connects to the data server through a port number that is lower than
+1024. In that case, the client should disconnect and retry a READ on a
+different data server, or it should retry a WRITE after reconnecting.
+
+Reviewed-by: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/flexfilelayout/flexfilelayout.c | 119 ++++++++++++++++++-------
+ 1 file changed, 85 insertions(+), 34 deletions(-)
+
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 46b106785eb82..f8962eaec87bc 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1103,6 +1103,7 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
+ }
+ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
++                                         u32 op_status,
+                                          struct nfs4_state *state,
+                                          struct nfs_client *clp,
+                                          struct pnfs_layout_segment *lseg,
+@@ -1113,34 +1114,42 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+       struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+       struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
+-      switch (task->tk_status) {
+-      case -NFS4ERR_BADSESSION:
+-      case -NFS4ERR_BADSLOT:
+-      case -NFS4ERR_BAD_HIGH_SLOT:
+-      case -NFS4ERR_DEADSESSION:
+-      case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+-      case -NFS4ERR_SEQ_FALSE_RETRY:
+-      case -NFS4ERR_SEQ_MISORDERED:
++      switch (op_status) {
++      case NFS4_OK:
++      case NFS4ERR_NXIO:
++              break;
++      case NFSERR_PERM:
++              if (!task->tk_xprt)
++                      break;
++              xprt_force_disconnect(task->tk_xprt);
++              goto out_retry;
++      case NFS4ERR_BADSESSION:
++      case NFS4ERR_BADSLOT:
++      case NFS4ERR_BAD_HIGH_SLOT:
++      case NFS4ERR_DEADSESSION:
++      case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
++      case NFS4ERR_SEQ_FALSE_RETRY:
++      case NFS4ERR_SEQ_MISORDERED:
+               dprintk("%s ERROR %d, Reset session. Exchangeid "
+                       "flags 0x%x\n", __func__, task->tk_status,
+                       clp->cl_exchange_flags);
+               nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
+-              break;
+-      case -NFS4ERR_DELAY:
++              goto out_retry;
++      case NFS4ERR_DELAY:
+               nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+               fallthrough;
+-      case -NFS4ERR_GRACE:
++      case NFS4ERR_GRACE:
+               rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
+-              break;
+-      case -NFS4ERR_RETRY_UNCACHED_REP:
+-              break;
++              goto out_retry;
++      case NFS4ERR_RETRY_UNCACHED_REP:
++              goto out_retry;
+       /* Invalidate Layout errors */
+-      case -NFS4ERR_PNFS_NO_LAYOUT:
+-      case -ESTALE:           /* mapped NFS4ERR_STALE */
+-      case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
+-      case -EISDIR:           /* mapped NFS4ERR_ISDIR */
+-      case -NFS4ERR_FHEXPIRED:
+-      case -NFS4ERR_WRONG_TYPE:
++      case NFS4ERR_PNFS_NO_LAYOUT:
++      case NFS4ERR_STALE:
++      case NFS4ERR_BADHANDLE:
++      case NFS4ERR_ISDIR:
++      case NFS4ERR_FHEXPIRED:
++      case NFS4ERR_WRONG_TYPE:
+               dprintk("%s Invalid layout error %d\n", __func__,
+                       task->tk_status);
+               /*
+@@ -1153,6 +1162,11 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+               pnfs_destroy_layout(NFS_I(inode));
+               rpc_wake_up(&tbl->slot_tbl_waitq);
+               goto reset;
++      default:
++              break;
++      }
++
++      switch (task->tk_status) {
+       /* RPC connection errors */
+       case -ECONNREFUSED:
+       case -EHOSTDOWN:
+@@ -1166,26 +1180,56 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+               nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+                               &devid->deviceid);
+               rpc_wake_up(&tbl->slot_tbl_waitq);
+-              fallthrough;
++              break;
+       default:
+-              if (ff_layout_avoid_mds_available_ds(lseg))
+-                      return -NFS4ERR_RESET_TO_PNFS;
+-reset:
+-              dprintk("%s Retry through MDS. Error %d\n", __func__,
+-                      task->tk_status);
+-              return -NFS4ERR_RESET_TO_MDS;
++              break;
+       }
++
++      if (ff_layout_avoid_mds_available_ds(lseg))
++              return -NFS4ERR_RESET_TO_PNFS;
++reset:
++      dprintk("%s Retry through MDS. Error %d\n", __func__,
++              task->tk_status);
++      return -NFS4ERR_RESET_TO_MDS;
++
++out_retry:
+       task->tk_status = 0;
+       return -EAGAIN;
+ }
+ /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
+ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
++                                         u32 op_status,
++                                         struct nfs_client *clp,
+                                          struct pnfs_layout_segment *lseg,
+                                          u32 idx)
+ {
+       struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
++      switch (op_status) {
++      case NFS_OK:
++      case NFSERR_NXIO:
++              break;
++      case NFSERR_PERM:
++              if (!task->tk_xprt)
++                      break;
++              xprt_force_disconnect(task->tk_xprt);
++              goto out_retry;
++      case NFSERR_ACCES:
++      case NFSERR_BADHANDLE:
++      case NFSERR_FBIG:
++      case NFSERR_IO:
++      case NFSERR_NOSPC:
++      case NFSERR_ROFS:
++      case NFSERR_STALE:
++              goto out_reset_to_pnfs;
++      case NFSERR_JUKEBOX:
++              nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
++              goto out_retry;
++      default:
++              break;
++      }
++
+       switch (task->tk_status) {
+       /* File access problems. Don't mark the device as unavailable */
+       case -EACCES:
+@@ -1204,6 +1248,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+               nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+                               &devid->deviceid);
+       }
++out_reset_to_pnfs:
+       /* FIXME: Need to prevent infinite looping here. */
+       return -NFS4ERR_RESET_TO_PNFS;
+ out_retry:
+@@ -1214,6 +1259,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ }
+ static int ff_layout_async_handle_error(struct rpc_task *task,
++                                      u32 op_status,
+                                       struct nfs4_state *state,
+                                       struct nfs_client *clp,
+                                       struct pnfs_layout_segment *lseg,
+@@ -1232,10 +1278,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
+       switch (vers) {
+       case 3:
+-              return ff_layout_async_handle_error_v3(task, lseg, idx);
+-      case 4:
+-              return ff_layout_async_handle_error_v4(task, state, clp,
++              return ff_layout_async_handle_error_v3(task, op_status, clp,
+                                                      lseg, idx);
++      case 4:
++              return ff_layout_async_handle_error_v4(task, op_status, state,
++                                                     clp, lseg, idx);
+       default:
+               /* should never happen */
+               WARN_ON_ONCE(1);
+@@ -1286,6 +1333,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+       switch (status) {
+       case NFS4ERR_DELAY:
+       case NFS4ERR_GRACE:
++      case NFS4ERR_PERM:
+               break;
+       case NFS4ERR_NXIO:
+               ff_layout_mark_ds_unreachable(lseg, idx);
+@@ -1318,7 +1366,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
+               trace_ff_layout_read_error(hdr);
+       }
+-      err = ff_layout_async_handle_error(task, hdr->args.context->state,
++      err = ff_layout_async_handle_error(task, hdr->res.op_status,
++                                         hdr->args.context->state,
+                                          hdr->ds_clp, hdr->lseg,
+                                          hdr->pgio_mirror_idx);
+@@ -1483,7 +1532,8 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
+               trace_ff_layout_write_error(hdr);
+       }
+-      err = ff_layout_async_handle_error(task, hdr->args.context->state,
++      err = ff_layout_async_handle_error(task, hdr->res.op_status,
++                                         hdr->args.context->state,
+                                          hdr->ds_clp, hdr->lseg,
+                                          hdr->pgio_mirror_idx);
+@@ -1529,8 +1579,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
+               trace_ff_layout_commit_error(data);
+       }
+-      err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
+-                                         data->lseg, data->ds_commit_index);
++      err = ff_layout_async_handle_error(task, data->res.op_status,
++                                         NULL, data->ds_clp, data->lseg,
++                                         data->ds_commit_index);
+       trace_nfs4_pnfs_commit_ds(data, err);
+       switch (err) {
+-- 
+2.39.5
+
diff --git a/queue-5.10/rdma-mlx5-fix-vport-loopback-for-mpv-device.patch b/queue-5.10/rdma-mlx5-fix-vport-loopback-for-mpv-device.patch
new file mode 100644 (file)
index 0000000..c2cad07
--- /dev/null
@@ -0,0 +1,87 @@
+From 90275f7225db55b5e8dbe2a0d673d4259eb102d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Jun 2025 12:14:54 +0300
+Subject: RDMA/mlx5: Fix vport loopback for MPV device
+
+From: Patrisious Haddad <phaddad@nvidia.com>
+
+[ Upstream commit a9a9e68954f29b1e197663f76289db4879fd51bb ]
+
+Always enable vport loopback for both MPV devices on driver start.
+
+Previously in some cases related to MPV RoCE, packets weren't correctly
+executing loopback check at vport in FW, since it was disabled.
+Due to complexity of identifying such cases for MPV always enable vport
+loopback for both GVMIs when binding the slave to the master port.
+
+Fixes: 0042f9e458a5 ("RDMA/mlx5: Enable vport loopback when user context or QP mandate")
+Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/d4298f5ebb2197459e9e7221c51ecd6a34699847.1750064969.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/main.c | 33 +++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 1800cea46b2d3..0e20b99cae8b6 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1667,6 +1667,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
+                       mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ }
++static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
++                              struct mlx5_core_dev *slave)
++{
++      int err;
++
++      err = mlx5_nic_vport_update_local_lb(master, true);
++      if (err)
++              return err;
++
++      err = mlx5_nic_vport_update_local_lb(slave, true);
++      if (err)
++              goto out;
++
++      return 0;
++
++out:
++      mlx5_nic_vport_update_local_lb(master, false);
++      return err;
++}
++
++static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
++                                struct mlx5_core_dev *slave)
++{
++      mlx5_nic_vport_update_local_lb(slave, false);
++      mlx5_nic_vport_update_local_lb(master, false);
++}
++
+ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+ {
+       int err = 0;
+@@ -3424,6 +3451,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+       lockdep_assert_held(&mlx5_ib_multiport_mutex);
++      mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
++
+       mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
+       spin_lock(&port->mp.mpi_lock);
+@@ -3512,6 +3541,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+       mlx5_ib_init_cong_debugfs(ibdev, port_num);
++      err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
++      if (err)
++              goto unbind;
++
+       return true;
+ unbind:
+-- 
+2.39.5
+
diff --git a/queue-5.10/regulator-gpio-fix-the-out-of-bounds-access-to-drvda.patch b/queue-5.10/regulator-gpio-fix-the-out-of-bounds-access-to-drvda.patch
new file mode 100644 (file)
index 0000000..68e658e
--- /dev/null
@@ -0,0 +1,46 @@
+From 9c051a9124b934d96fc36874519eb030569c428c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Jul 2025 16:05:49 +0530
+Subject: regulator: gpio: Fix the out-of-bounds access to drvdata::gpiods
+
+From: Manivannan Sadhasivam <mani@kernel.org>
+
+[ Upstream commit c9764fd88bc744592b0604ccb6b6fc1a5f76b4e3 ]
+
+drvdata::gpiods is supposed to hold an array of 'gpio_desc' pointers. But
+the memory is allocated for only one pointer. This will lead to
+out-of-bounds access later in the code if 'config::ngpios' is > 1. So
+fix the code to allocate enough memory to hold 'config::ngpios' of GPIO
+descriptors.
+
+While at it, also move the check for memory allocation failure to be below
+the allocation to make it more readable.
+
+Cc: stable@vger.kernel.org # 5.0
+Fixes: d6cd33ad7102 ("regulator: gpio: Convert to use descriptors")
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Link: https://patch.msgid.link/20250703103549.16558-1-mani@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/gpio-regulator.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
+index 5927d4f3eabd7..de07b16b34f8e 100644
+--- a/drivers/regulator/gpio-regulator.c
++++ b/drivers/regulator/gpio-regulator.c
+@@ -257,8 +257,8 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+               return -ENOMEM;
+       }
+-      drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *),
+-                                     GFP_KERNEL);
++      drvdata->gpiods = devm_kcalloc(dev, config->ngpios,
++                                     sizeof(struct gpio_desc *), GFP_KERNEL);
+       if (!drvdata->gpiods)
+               return -ENOMEM;
+       for (i = 0; i < config->ngpios; i++) {
+-- 
+2.39.5
+
index f0a1eb967f878a30d6cbf02c51129c82e613cadb..200247360e928cbef10b7d2010cc7c642e45287d 100644 (file)
@@ -115,3 +115,21 @@ wifi-mac80211-drop-invalid-source-address-ocb-frames.patch
 wifi-ath6kl-remove-warn-on-bad-firmware-input.patch
 acpica-refuse-to-evaluate-a-method-if-arguments-are-.patch
 rcu-return-early-if-callback-is-not-specified.patch
+virtio_ring-introduce-dma-map-api-for-virtqueue.patch
+virtio_ring-introduce-dma-sync-api-for-virtqueue.patch
+virtio-net-ensure-the-received-length-does-not-excee.patch
+regulator-gpio-fix-the-out-of-bounds-access-to-drvda.patch
+mmc-mediatek-use-data-instead-of-mrq-parameter-from-.patch
+mtk-sd-prevent-memory-corruption-from-dma-map-failur.patch
+mtk-sd-reset-host-mrq-on-prepare_data-error.patch
+drm-v3d-disable-interrupts-before-resetting-the-gpu.patch
+rdma-mlx5-fix-vport-loopback-for-mpv-device.patch
+flexfiles-pnfs-update-stats-on-nfs4err_delay-for-v4..patch
+nfsv4-flexfiles-fix-handling-of-nfs-level-errors-in-.patch
+btrfs-propagate-last_unlink_trans-earlier-when-doing.patch
+btrfs-use-btrfs_record_snapshot_destroy-during-rmdir.patch
+dpaa2-eth-rename-dpaa2_eth_xdp_release_buf-into-dpaa.patch
+dpaa2-eth-update-dpni_get_single_step_cfg-command.patch
+dpaa2-eth-update-single_step-register-access.patch
+net-dpaa2-eth-rearrange-variable-in-dpaa2_eth_get_et.patch
+dpaa2-eth-fix-xdp_rxq_info-leak.patch
diff --git a/queue-5.10/virtio-net-ensure-the-received-length-does-not-excee.patch b/queue-5.10/virtio-net-ensure-the-received-length-does-not-excee.patch
new file mode 100644 (file)
index 0000000..eff9e9c
--- /dev/null
@@ -0,0 +1,134 @@
+From bb037700dfb1d8734a583f76d2550eafbcc75a1d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Jul 2025 10:06:07 -0400
+Subject: virtio-net: ensure the received length does not exceed allocated size
+
+From: Bui Quang Minh <minhquangbui99@gmail.com>
+
+[ Upstream commit 315dbdd7cdf6aa533829774caaf4d25f1fd20e73 ]
+
+In xdp_linearize_page, when reading the following buffers from the ring,
+we forget to check the received length with the true allocate size. This
+can lead to an out-of-bound read. This commit adds that missing check.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 4941d472bf95 ("virtio-net: do not reset during XDP set")
+Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Link: https://patch.msgid.link/20250630144212.48471-2-minhquangbui99@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/virtio_net.c | 44 +++++++++++++++++++++++++++++++++-------
+ 1 file changed, 37 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 99dea89b26788..3de39df3462c7 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -394,6 +394,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
+       return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
+ }
++static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
++                             unsigned int len)
++{
++      unsigned int headroom, tailroom, room, truesize;
++
++      truesize = mergeable_ctx_to_truesize(mrg_ctx);
++      headroom = mergeable_ctx_to_headroom(mrg_ctx);
++      tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
++      room = SKB_DATA_ALIGN(headroom + tailroom);
++
++      if (len > truesize - room) {
++              pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
++                       dev->name, len, (unsigned long)(truesize - room));
++              DEV_STATS_INC(dev, rx_length_errors);
++              return -1;
++      }
++
++      return 0;
++}
++
+ /* Called from bottom half context */
+ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+                                  struct receive_queue *rq,
+@@ -639,8 +659,9 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
+  * across multiple buffers (num_buf > 1), and we make sure buffers
+  * have enough headroom.
+  */
+-static struct page *xdp_linearize_page(struct receive_queue *rq,
+-                                     u16 *num_buf,
++static struct page *xdp_linearize_page(struct net_device *dev,
++                                     struct receive_queue *rq,
++                                     int *num_buf,
+                                      struct page *p,
+                                      int offset,
+                                      int page_off,
+@@ -659,18 +680,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+       memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+       page_off += *len;
++      /* Only mergeable mode can go inside this while loop. In small mode,
++       * *num_buf == 1, so it cannot go inside.
++       */
+       while (--*num_buf) {
+               unsigned int buflen;
+               void *buf;
++              void *ctx;
+               int off;
+-              buf = virtqueue_get_buf(rq->vq, &buflen);
++              buf = virtqueue_get_buf_ctx(rq->vq, &buflen, &ctx);
+               if (unlikely(!buf))
+                       goto err_buf;
+               p = virt_to_head_page(buf);
+               off = buf - page_address(p);
++              if (check_mergeable_len(dev, ctx, buflen)) {
++                      put_page(p);
++                      goto err_buf;
++              }
++
+               /* guard against a misconfigured or uncooperative backend that
+                * is sending packet larger than the MTU.
+                */
+@@ -738,14 +768,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
+               if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
+                       int offset = buf - page_address(page) + header_offset;
+                       unsigned int tlen = len + vi->hdr_len;
+-                      u16 num_buf = 1;
++                      int num_buf = 1;
+                       xdp_headroom = virtnet_get_headroom(vi);
+                       header_offset = VIRTNET_RX_PAD + xdp_headroom;
+                       headroom = vi->hdr_len + header_offset;
+                       buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
+                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-                      xdp_page = xdp_linearize_page(rq, &num_buf, page,
++                      xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
+                                                     offset, header_offset,
+                                                     &tlen);
+                       if (!xdp_page)
+@@ -866,7 +896,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+                                        struct virtnet_rq_stats *stats)
+ {
+       struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+-      u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
++      int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
+       struct page *page = virt_to_head_page(buf);
+       int offset = buf - page_address(page);
+       struct sk_buff *head_skb, *curr_skb;
+@@ -916,7 +946,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+               if (unlikely(num_buf > 1 ||
+                            headroom < virtnet_get_headroom(vi))) {
+                       /* linearize data for XDP */
+-                      xdp_page = xdp_linearize_page(rq, &num_buf,
++                      xdp_page = xdp_linearize_page(vi->dev, rq, &num_buf,
+                                                     page, offset,
+                                                     VIRTIO_XDP_HEADROOM,
+                                                     &len);
+-- 
+2.39.5
+
diff --git a/queue-5.10/virtio_ring-introduce-dma-map-api-for-virtqueue.patch b/queue-5.10/virtio_ring-introduce-dma-map-api-for-virtqueue.patch
new file mode 100644 (file)
index 0000000..d911563
--- /dev/null
@@ -0,0 +1,128 @@
+From fc185d9d91feecf009348f76e2ed624bb53b4f2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Aug 2023 20:30:55 +0800
+Subject: virtio_ring: introduce dma map api for virtqueue
+
+From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+
+[ Upstream commit b6253b4e21939f1bb54e8fdb84c23af9c3fb834a ]
+
+Added virtqueue_dma_map_api* to map DMA addresses for virtual memory in
+advance. The purpose is to keep memory mapped across multiple add/get
+buf operations.
+
+Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Message-Id: <20230810123057.43407-11-xuanzhuo@linux.alibaba.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Stable-dep-of: 315dbdd7cdf6 ("virtio-net: ensure the received length does not exceed allocated size")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_ring.c | 69 ++++++++++++++++++++++++++++++++++++
+ include/linux/virtio.h       |  8 +++++
+ 2 files changed, 77 insertions(+)
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index cf0e8e1893ee6..4bd5af5fd819d 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -2349,4 +2349,73 @@ const struct vring *virtqueue_get_vring(struct virtqueue *vq)
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_get_vring);
++/**
++ * virtqueue_dma_map_single_attrs - map DMA for _vq
++ * @_vq: the struct virtqueue we're talking about.
++ * @ptr: the pointer of the buffer to do dma
++ * @size: the size of the buffer to do dma
++ * @dir: DMA direction
++ * @attrs: DMA Attrs
++ *
++ * The caller calls this to do dma mapping in advance. The DMA address can be
++ * passed to this _vq when it is in pre-mapped mode.
++ *
++ * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
++ */
++dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
++                                        size_t size,
++                                        enum dma_data_direction dir,
++                                        unsigned long attrs)
++{
++      struct vring_virtqueue *vq = to_vvq(_vq);
++
++      if (!vq->use_dma_api)
++              return (dma_addr_t)virt_to_phys(ptr);
++
++      return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
++}
++EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
++
++/**
++ * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
++ * @_vq: the struct virtqueue we're talking about.
++ * @addr: the dma address to unmap
++ * @size: the size of the buffer
++ * @dir: DMA direction
++ * @attrs: DMA Attrs
++ *
++ * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
++ *
++ */
++void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
++                                    size_t size, enum dma_data_direction dir,
++                                    unsigned long attrs)
++{
++      struct vring_virtqueue *vq = to_vvq(_vq);
++
++      if (!vq->use_dma_api)
++              return;
++
++      dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
++}
++EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
++
++/**
++ * virtqueue_dma_mapping_error - check dma address
++ * @_vq: the struct virtqueue we're talking about.
++ * @addr: DMA address
++ *
++ * Returns 0 means dma valid. Other means invalid dma address.
++ */
++int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
++{
++      struct vring_virtqueue *vq = to_vvq(_vq);
++
++      if (!vq->use_dma_api)
++              return 0;
++
++      return dma_mapping_error(vring_dma_dev(vq), addr);
++}
++EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
++
+ MODULE_LICENSE("GPL");
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 90c5ad5568097..0ad13391f7c6b 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -9,6 +9,7 @@
+ #include <linux/device.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/gfp.h>
++#include <linux/dma-mapping.h>
+ /**
+  * virtqueue - a queue to register buffers for sending or receiving.
+@@ -196,4 +197,11 @@ void unregister_virtio_driver(struct virtio_driver *drv);
+ #define module_virtio_driver(__virtio_driver) \
+       module_driver(__virtio_driver, register_virtio_driver, \
+                       unregister_virtio_driver)
++
++dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
++                                        enum dma_data_direction dir, unsigned long attrs);
++void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
++                                    size_t size, enum dma_data_direction dir,
++                                    unsigned long attrs);
++int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
+ #endif /* _LINUX_VIRTIO_H */
+-- 
+2.39.5
+
diff --git a/queue-5.10/virtio_ring-introduce-dma-sync-api-for-virtqueue.patch b/queue-5.10/virtio_ring-introduce-dma-sync-api-for-virtqueue.patch
new file mode 100644 (file)
index 0000000..51380de
--- /dev/null
@@ -0,0 +1,133 @@
+From 7505756a61ead8f60e6e0096f013cfaa21fe8f55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Aug 2023 20:30:56 +0800
+Subject: virtio_ring: introduce dma sync api for virtqueue
+
+From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+
+[ Upstream commit 8bd2f71054bd0bc997833e9825143672eb7e2801 ]
+
+These API has been introduced:
+
+* virtqueue_dma_need_sync
+* virtqueue_dma_sync_single_range_for_cpu
+* virtqueue_dma_sync_single_range_for_device
+
+These APIs can be used together with the premapped mechanism to sync the
+DMA address.
+
+Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Message-Id: <20230810123057.43407-12-xuanzhuo@linux.alibaba.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Stable-dep-of: 315dbdd7cdf6 ("virtio-net: ensure the received length does not exceed allocated size")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_ring.c | 76 ++++++++++++++++++++++++++++++++++++
+ include/linux/virtio.h       |  8 ++++
+ 2 files changed, 84 insertions(+)
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 4bd5af5fd819d..7e5b30ea8c8e2 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -2418,4 +2418,80 @@ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
++/**
++ * virtqueue_dma_need_sync - check a dma address needs sync
++ * @_vq: the struct virtqueue we're talking about.
++ * @addr: DMA address
++ *
++ * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
++ * synchronized
++ *
++ * return bool
++ */
++bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
++{
++      struct vring_virtqueue *vq = to_vvq(_vq);
++
++      if (!vq->use_dma_api)
++              return false;
++
++      return dma_need_sync(vring_dma_dev(vq), addr);
++}
++EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
++
++/**
++ * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
++ * @_vq: the struct virtqueue we're talking about.
++ * @addr: DMA address
++ * @offset: DMA address offset
++ * @size: buf size for sync
++ * @dir: DMA direction
++ *
++ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
++ * the DMA address really needs to be synchronized
++ *
++ */
++void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
++                                           dma_addr_t addr,
++                                           unsigned long offset, size_t size,
++                                           enum dma_data_direction dir)
++{
++      struct vring_virtqueue *vq = to_vvq(_vq);
++      struct device *dev = vring_dma_dev(vq);
++
++      if (!vq->use_dma_api)
++              return;
++
++      dma_sync_single_range_for_cpu(dev, addr, offset, size,
++                                    DMA_BIDIRECTIONAL);
++}
++EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
++
++/**
++ * virtqueue_dma_sync_single_range_for_device - dma sync for device
++ * @_vq: the struct virtqueue we're talking about.
++ * @addr: DMA address
++ * @offset: DMA address offset
++ * @size: buf size for sync
++ * @dir: DMA direction
++ *
++ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
++ * the DMA address really needs to be synchronized
++ */
++void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
++                                              dma_addr_t addr,
++                                              unsigned long offset, size_t size,
++                                              enum dma_data_direction dir)
++{
++      struct vring_virtqueue *vq = to_vvq(_vq);
++      struct device *dev = vring_dma_dev(vq);
++
++      if (!vq->use_dma_api)
++              return;
++
++      dma_sync_single_range_for_device(dev, addr, offset, size,
++                                       DMA_BIDIRECTIONAL);
++}
++EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
++
+ MODULE_LICENSE("GPL");
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 0ad13391f7c6b..5d3b16dc3913c 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -204,4 +204,12 @@ void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+                                     size_t size, enum dma_data_direction dir,
+                                     unsigned long attrs);
+ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
++
++bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
++void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
++                                           unsigned long offset, size_t size,
++                                           enum dma_data_direction dir);
++void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
++                                              unsigned long offset, size_t size,
++                                              enum dma_data_direction dir);
+ #endif /* _LINUX_VIRTIO_H */
+-- 
+2.39.5
+