]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
idpf: add XSk pool initialization
authorMichal Kubiak <michal.kubiak@intel.com>
Thu, 11 Sep 2025 16:22:30 +0000 (18:22 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Wed, 24 Sep 2025 17:34:35 +0000 (10:34 -0700)
Add functionality to setup an XSk buffer pool, including ability to
stop, reconfig and start only selected queues, not the whole device.
Pool DMA mapping is managed by libeth_xdp.

Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Tested-by: Ramu R <ramu.r@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/idpf/Makefile
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/intel/idpf/idpf_txrx.h
drivers/net/ethernet/intel/idpf/xdp.c
drivers/net/ethernet/intel/idpf/xdp.h
drivers/net/ethernet/intel/idpf/xsk.c [new file with mode: 0644]
drivers/net/ethernet/intel/idpf/xsk.h [new file with mode: 0644]

index 0840c3bef37174bd26ce05e0fca55ff11f3597fa..651ddee942bd8c46daa739968bba30e6830946f0 100644 (file)
@@ -23,3 +23,4 @@ idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
 idpf-$(CONFIG_PTP_1588_CLOCK)  += idpf_virtchnl_ptp.o
 
 idpf-y                         += xdp.o
+idpf-y                         += xsk.o
index 786d0bacdd3c6aef78014d0eb3004c830ca26679..a5a1eec9ade8bd110507ccf6c0bfe94563be51b8 100644 (file)
@@ -1245,8 +1245,8 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
  *
  * returns pointer to rx vector
  */
-static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
-                                              int q_num)
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+                                       u32 q_num)
 {
        int q_grp, q_idx;
 
@@ -1266,8 +1266,8 @@ static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
  *
  * returns pointer to tx vector
  */
-static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
-                                              int q_num)
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+                                       u32 q_num)
 {
        int q_grp;
 
index 81b6646dd3fc58dba9f2a63a652f763e1c8f06cd..542e09a83bc0cab082f7172a34642f3e3600307c 100644 (file)
@@ -922,6 +922,305 @@ err_out:
        return err;
 }
 
+static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+{
+       const struct idpf_vport *vport = qs->vport;
+       bool splitq;
+       int err;
+
+       splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+       for (u32 i = 0; i < qs->num; i++) {
+               const struct idpf_queue_ptr *q = &qs->qs[i];
+               struct idpf_buf_queue *bufq;
+
+               switch (q->type) {
+               case VIRTCHNL2_QUEUE_TYPE_RX:
+                       err = idpf_rx_desc_alloc(vport, q->rxq);
+                       if (err)
+                               break;
+
+                       err = idpf_xdp_rxq_info_init(q->rxq);
+                       if (err)
+                               break;
+
+                       if (!splitq)
+                               err = idpf_rx_bufs_init_singleq(q->rxq);
+
+                       break;
+               case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+                       bufq = q->bufq;
+
+                       err = idpf_bufq_desc_alloc(vport, bufq);
+                       if (err)
+                               break;
+
+                       for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) {
+                               struct idpf_buf_queue * const *bufqs;
+                               enum libeth_fqe_type type;
+                               u32 ts;
+
+                               bufqs = bufq->q_vector->bufq;
+                               if (bufqs[j] != bufq)
+                                       continue;
+
+                               if (j) {
+                                       type = LIBETH_FQE_SHORT;
+                                       ts = bufqs[j - 1]->truesize >> 1;
+                               } else {
+                                       type = LIBETH_FQE_MTU;
+                                       ts = 0;
+                               }
+
+                               bufq->truesize = ts;
+
+                               err = idpf_rx_bufs_init(bufq, type);
+                               break;
+                       }
+
+                       break;
+               case VIRTCHNL2_QUEUE_TYPE_TX:
+                       err = idpf_tx_desc_alloc(vport, q->txq);
+                       break;
+               case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+                       err = idpf_compl_desc_alloc(vport, q->complq);
+                       break;
+               default:
+                       continue;
+               }
+
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
+{
+       const struct idpf_vport *vport = qs->vport;
+       struct device *dev = vport->netdev->dev.parent;
+
+       for (u32 i = 0; i < qs->num; i++) {
+               const struct idpf_queue_ptr *q = &qs->qs[i];
+
+               switch (q->type) {
+               case VIRTCHNL2_QUEUE_TYPE_RX:
+                       idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
+                       idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+                       break;
+               case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+                       idpf_rx_desc_rel_bufq(q->bufq, dev);
+                       break;
+               case VIRTCHNL2_QUEUE_TYPE_TX:
+                       idpf_tx_desc_rel(q->txq);
+
+                       if (idpf_queue_has(XDP, q->txq)) {
+                               q->txq->pending = 0;
+                               q->txq->xdp_tx = 0;
+                       } else {
+                               q->txq->txq_grp->num_completions_pending = 0;
+                       }
+
+                       writel(q->txq->next_to_use, q->txq->tail);
+                       break;
+               case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+                       idpf_compl_desc_rel(q->complq);
+                       q->complq->num_completions = 0;
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
+{
+       if (qv->num_txq) {
+               u32 itr;
+
+               if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode))
+                       itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix];
+               else
+                       itr = qv->tx_itr_value;
+
+               idpf_vport_intr_write_itr(qv, itr, true);
+       }
+
+       if (qv->num_rxq) {
+               u32 itr;
+
+               if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode))
+                       itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix];
+               else
+                       itr = qv->rx_itr_value;
+
+               idpf_vport_intr_write_itr(qv, itr, false);
+       }
+
+       if (qv->num_txq || qv->num_rxq)
+               idpf_vport_intr_update_itr_ena_irq(qv);
+}
+
+/**
+ * idpf_vector_to_queue_set - create a queue set associated with the given
+ *                           queue vector
+ * @qv: queue vector corresponding to the queue pair
+ *
+ * Returns a pointer to a dynamically allocated array of pointers to all
+ * queues associated with a given queue vector (@qv).
+ * Please note that the caller is responsible to free the memory allocated
+ * by this function using kfree().
+ *
+ * Return: &idpf_queue_set on success, %NULL in case of error.
+ */
+static struct idpf_queue_set *
+idpf_vector_to_queue_set(struct idpf_q_vector *qv)
+{
+       bool xdp = qv->vport->xdp_txq_offset;
+       struct idpf_vport *vport = qv->vport;
+       struct idpf_queue_set *qs;
+       u32 num;
+
+       num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq;
+       num += xdp ? qv->num_rxq * 2 : 0;
+       if (!num)
+               return NULL;
+
+       qs = idpf_alloc_queue_set(vport, num);
+       if (!qs)
+               return NULL;
+
+       num = 0;
+
+       for (u32 i = 0; i < qv->num_bufq; i++) {
+               qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+               qs->qs[num++].bufq = qv->bufq[i];
+       }
+
+       for (u32 i = 0; i < qv->num_rxq; i++) {
+               qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX;
+               qs->qs[num++].rxq = qv->rx[i];
+       }
+
+       for (u32 i = 0; i < qv->num_txq; i++) {
+               qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+               qs->qs[num++].txq = qv->tx[i];
+       }
+
+       for (u32 i = 0; i < qv->num_complq; i++) {
+               qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+               qs->qs[num++].complq = qv->complq[i];
+       }
+
+       if (!vport->xdp_txq_offset)
+               goto finalize;
+
+       if (xdp) {
+               for (u32 i = 0; i < qv->num_rxq; i++) {
+                       u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+
+                       qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+                       qs->qs[num++].txq = vport->txqs[idx];
+
+                       qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+                       qs->qs[num++].complq = vport->txqs[idx]->complq;
+               }
+       }
+
+finalize:
+       if (num != qs->num) {
+               kfree(qs);
+               return NULL;
+       }
+
+       return qs;
+}
+
+static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+{
+       struct idpf_vport *vport = qs->vport;
+       struct idpf_q_vector *q_vector;
+       int err;
+
+       q_vector = idpf_find_rxq_vec(vport, qid);
+
+       err = idpf_init_queue_set(qs);
+       if (err) {
+               netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
+                          qid, ERR_PTR(err));
+               return err;
+       }
+
+       err = idpf_send_config_queue_set_msg(qs);
+       if (err) {
+               netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n",
+                          qid, ERR_PTR(err));
+               return err;
+       }
+
+       err = idpf_send_enable_queue_set_msg(qs);
+       if (err) {
+               netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n",
+                          qid, ERR_PTR(err));
+               return err;
+       }
+
+       napi_enable(&q_vector->napi);
+       idpf_qvec_ena_irq(q_vector);
+
+       netif_start_subqueue(vport->netdev, qid);
+
+       return 0;
+}
+
+static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+{
+       struct idpf_vport *vport = qs->vport;
+       struct idpf_q_vector *q_vector;
+       int err;
+
+       q_vector = idpf_find_rxq_vec(vport, qid);
+       netif_stop_subqueue(vport->netdev, qid);
+
+       writel(0, q_vector->intr_reg.dyn_ctl);
+       napi_disable(&q_vector->napi);
+
+       err = idpf_send_disable_queue_set_msg(qs);
+       if (err) {
+               netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n",
+                          qid, ERR_PTR(err));
+               return err;
+       }
+
+       idpf_clean_queue_set(qs);
+
+       return 0;
+}
+
+/**
+ * idpf_qp_switch - enable or disable queues associated with queue pair
+ * @vport: vport to switch the pair for
+ * @qid: index of the queue pair to switch
+ * @en: whether to enable or disable the pair
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
+{
+       struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
+       struct idpf_queue_set *qs __free(kfree) = NULL;
+
+       if (idpf_find_txq_vec(vport, qid) != q_vector)
+               return -EINVAL;
+
+       qs = idpf_vector_to_queue_set(q_vector);
+       if (!qs)
+               return -ENOMEM;
+
+       return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+}
+
 /**
  * idpf_txq_group_rel - Release all resources for txq groups
  * @vport: vport to release txq groups on
index 88dc3db488b1ed0c1e441c6507593615ffe70701..8faf33786747ebab465f992aac95fe06cc010718 100644 (file)
@@ -1050,6 +1050,13 @@ int idpf_config_rss(struct idpf_vport *vport);
 int idpf_init_rss(struct idpf_vport *vport);
 void idpf_deinit_rss(struct idpf_vport *vport);
 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+                                       u32 q_num);
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+                                       u32 q_num);
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
+
 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
                           bool xmit_more);
 unsigned int idpf_size_to_txd_count(unsigned int size);
index 89d5735f42f234dc1626c82dbd4291350ad43018..180335beaae14ce2e8b2b90fc8b12aba4a4a7f5d 100644 (file)
@@ -4,6 +4,7 @@
 #include "idpf.h"
 #include "idpf_virtchnl.h"
 #include "xdp.h"
+#include "xsk.h"
 
 static int idpf_rxq_for_each(const struct idpf_vport *vport,
                             int (*fn)(struct idpf_rx_queue *rxq, void *arg),
@@ -66,6 +67,11 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
        return 0;
 }
 
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
+{
+       return __idpf_xdp_rxq_info_init(rxq, NULL);
+}
+
 int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
 {
        return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
@@ -84,6 +90,11 @@ static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
        return 0;
 }
 
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
+{
+       __idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
+}
+
 void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
 {
        idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
@@ -442,6 +453,9 @@ int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        case XDP_SETUP_PROG:
                ret = idpf_xdp_setup_prog(vport, xdp);
                break;
+       case XDP_SETUP_XSK_POOL:
+               ret = idpf_xsk_pool_setup(vport, xdp);
+               break;
        default:
 notsupp:
                ret = -EOPNOTSUPP;
index 66ad83a0e85e929c79b0b67e1524f6dfdf643806..59c0391317c2a9e7d47402ff372fc2ea6aab39fd 100644 (file)
@@ -8,7 +8,9 @@
 
 #include "idpf_txrx.h"
 
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
 int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
 void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
 void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
                               struct bpf_prog *xdp_prog);
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
new file mode 100644 (file)
index 0000000..2098bf1
--- /dev/null
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <net/libeth/xsk.h>
+
+#include "idpf.h"
+#include "xsk.h"
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
+{
+       struct xsk_buff_pool *pool = bpf->xsk.pool;
+       u32 qid = bpf->xsk.queue_id;
+       bool restart;
+       int ret;
+
+       restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
+       if (!restart)
+               goto pool;
+
+       ret = idpf_qp_switch(vport, qid, false);
+       if (ret) {
+               NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+                                      "%s: failed to disable queue pair %u: %pe",
+                                      netdev_name(vport->netdev), qid,
+                                      ERR_PTR(ret));
+               return ret;
+       }
+
+pool:
+       ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
+       if (ret) {
+               NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+                                      "%s: failed to configure XSk pool for pair %u: %pe",
+                                      netdev_name(vport->netdev), qid,
+                                      ERR_PTR(ret));
+               return ret;
+       }
+
+       if (!restart)
+               return 0;
+
+       ret = idpf_qp_switch(vport, qid, true);
+       if (ret) {
+               NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+                                      "%s: failed to enable queue pair %u: %pe",
+                                      netdev_name(vport->netdev), qid,
+                                      ERR_PTR(ret));
+               goto err_dis;
+       }
+
+       return 0;
+
+err_dis:
+       libeth_xsk_setup_pool(vport->netdev, qid, false);
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xsk.h b/drivers/net/ethernet/intel/idpf/xsk.h
new file mode 100644 (file)
index 0000000..dc42268
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XSK_H_
+#define _IDPF_XSK_H_
+
+#include <linux/types.h>
+
+struct idpf_vport;
+struct netdev_bpf;
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);
+
+#endif /* !_IDPF_XSK_H_ */