]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: add helper to pre-check if PP for an Rx queue will be unreadable
authorJakub Kicinski <kuba@kernel.org>
Mon, 1 Sep 2025 21:12:10 +0000 (14:12 -0700)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 4 Sep 2025 08:19:17 +0000 (10:19 +0200)
mlx5 pokes into the rxq state to check if the queue has a memory
provider, and therefore whether it may produce unreadable mem.
Add a helper for doing this in the page pool API. fbnic will want
a similar thing (tho, for a slightly different reason).

Reviewed-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Link: https://patch.msgid.link/20250901211214.1027927-11-kuba@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
include/net/netdev_queues.h
include/net/page_pool/helpers.h
net/core/netdev_rx_queue.c

index 3970d0ddbcdc908bda322a7c73bb52fdb062f0ba..714cce5956923919f074ecba99001e656c3ad8fe 100644 (file)
@@ -780,13 +780,6 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
        bitmap_free(rq->mpwqe.shampo->bitmap);
 }
 
-static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq)
-{
-       struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix);
-
-       return !!rxq->mp_params.mp_ops;
-}
-
 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
                                struct mlx5e_params *params,
                                struct mlx5e_rq_param *rqp,
@@ -825,7 +818,7 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
        hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
                MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
 
-       if (mlx5_rq_needs_separate_hd_pool(rq)) {
+       if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
                /* Separate page pool for shampo headers */
                struct page_pool_params pp_params = { };
 
index b9d02bc65c97f0fc766ab0bfb881c86cd5ca09a0..cd00e0406cf46f72f9a732371c232e073bd40c1d 100644 (file)
@@ -151,6 +151,8 @@ struct netdev_queue_mgmt_ops {
                                                         int idx);
 };
 
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
+
 /**
  * DOC: Lockless queue stopping / waking helpers.
  *
index aa3719f28216a4e330dde524786d678b06d6ce3a..3247026e096abe5d909e69ee3da1254de4250a98 100644 (file)
@@ -505,6 +505,18 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
                page_pool_update_nid(pool, new_nid);
 }
 
+/**
+ * page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU
+ * @pool: queried page pool
+ *
+ * Check if page pool will return buffers which are unreadable to the CPU /
+ * kernel. This will only be the case if user space bound a memory provider (mp)
+ * which returns unreadable memory to the queue served by the page pool.
+ * If %PP_FLAG_ALLOW_UNREADABLE_NETMEM was set but there is no mp bound
+ * this helper will return false. See also netif_rxq_has_unreadable_mp().
+ *
+ * Return: true if memory allocated by the page pool may be unreadable
+ */
 static inline bool page_pool_is_unreadable(struct page_pool *pool)
 {
        return !!pool->mp_ops;
index 3bf1151d806107eb12cb7b3a00951b37e54910fe..c7d9341b76307bddbdf0c7e10ef3ff5fe074b346 100644 (file)
@@ -9,6 +9,15 @@
 
 #include "page_pool_priv.h"
 
+/* See also page_pool_is_unreadable() */
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
+{
+       struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
+
+       return !!rxq->mp_params.mp_ops;
+}
+EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
+
 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 {
        struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);