]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
idpf: add support for nointerrupt queues
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Tue, 26 Aug 2025 15:55:01 +0000 (17:55 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Mon, 8 Sep 2025 18:05:17 +0000 (11:05 -0700)
Currently, queues are associated 1:1 with interrupt vectors as it's
assumed queues are always interrupt-driven. For XDP, we want to use
Tx queues without interrupts and only do "lazy" cleaning when the number
of free elements is <= threshold (closest pow-2 to 1/4 of the ring).
In order to use a queue without an interrupt, idpf still needs to have
a vector assigned to it to flush descriptors. This vector can be global
and only one for the whole vport to handle all its noirq queues.
Always request one excessive vector and configure it in non-interrupt
mode right away when creating vport, so that it can be used later by
queues when needed (not only XDP ones).

Co-developed-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Tested-by: Ramu R <ramu.r@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/idpf/idpf.h
drivers/net/ethernet/intel/idpf/idpf_dev.c
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/intel/idpf/idpf_txrx.h
drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c

index 269e9b41645a6fff649e6b3978742dcb52ce4620..2bfdf0ae24cf22dd9275718afa62c10c5cda5651 100644 (file)
@@ -312,6 +312,9 @@ struct idpf_fsteer_fltr {
  * @num_q_vectors: Number of IRQ vectors allocated
  * @q_vectors: Array of queue vectors
  * @q_vector_idxs: Starting index of queue vectors
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_v_idx: ID of the NOIRQ vector
  * @max_mtu: device given max possible MTU
  * @default_mac_addr: device will give a default MAC to use
  * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
@@ -358,6 +361,11 @@ struct idpf_vport {
        u16 num_q_vectors;
        struct idpf_q_vector *q_vectors;
        u16 *q_vector_idxs;
+
+       void __iomem *noirq_dyn_ctl;
+       u32 noirq_dyn_ctl_ena;
+       u16 noirq_v_idx;
+
        u16 max_mtu;
        u8 default_mac_addr[ETH_ALEN];
        u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
index bfa60f7d43de3f03523f345241919f3a82c7245e..3a04a6bd0d7cca32b01b3375d61d60ad555d49f9 100644 (file)
@@ -77,7 +77,7 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
        int num_vecs = vport->num_q_vectors;
        struct idpf_vec_regs *reg_vals;
        int num_regs, i, err = 0;
-       u32 rx_itr, tx_itr;
+       u32 rx_itr, tx_itr, val;
        u16 total_vecs;
 
        total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -121,6 +121,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
                intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
        }
 
+       /* Data vector for NOIRQ queues */
+
+       val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+       vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+       val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
+             FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+       vport->noirq_dyn_ctl_ena = val;
+
 free_reg_vals:
        kfree(reg_vals);
 
index 2f9bc77866294cd85622cf6dc0939c4784eada8a..cad8c9426c92b4de97cec289ac5faeea1ff75716 100644 (file)
@@ -1142,7 +1142,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
        if (!vport)
                return vport;
 
-       num_max_q = max(max_q->max_txq, max_q->max_rxq);
+       num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
        if (!adapter->vport_config[idx]) {
                struct idpf_vport_config *vport_config;
                struct idpf_q_coalesce *q_coal;
index 976c4e0b8afd1b5189c7430e54606ba5925b26f5..d9f1a73f98c8d8dba6f2ce7f4dc29fd8f729cb4f 100644 (file)
@@ -3507,6 +3507,8 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
        struct idpf_q_vector *q_vector = vport->q_vectors;
        int q_idx;
 
+       writel(0, vport->noirq_dyn_ctl);
+
        for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
                writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
 }
@@ -3750,6 +3752,8 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
                if (qv->num_txq || qv->num_rxq)
                        idpf_vport_intr_update_itr_ena_irq(qv);
        }
+
+       writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
 }
 
 /**
@@ -4061,6 +4065,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
                for (i = 0; i < vport->num_q_vectors; i++)
                        vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
 
+               vport->noirq_v_idx = vport->q_vector_idxs[i];
+
                return 0;
        }
 
@@ -4074,6 +4080,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
        for (i = 0; i < vport->num_q_vectors; i++)
                vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
 
+       vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+
        kfree(vecids);
 
        return 0;
index 1c570794e5bc6f7a961ed851568aa14ac27cc2f0..f8e579dab21a755e04f7892c2af06fcfc1d2c35c 100644 (file)
@@ -58,6 +58,8 @@
 #define IDPF_MBX_Q_VEC         1
 #define IDPF_MIN_Q_VEC         1
 #define IDPF_MIN_RDMA_VEC      2
+/* Data vector for NOIRQ queues */
+#define IDPF_RESERVED_VECS                     1
 
 #define IDPF_DFLT_TX_Q_DESC_COUNT              512
 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT         512
@@ -279,6 +281,7 @@ struct idpf_ptype_state {
  * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
  * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
  *               queue
+ * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt
  * @__IDPF_Q_FLAGS_NBITS: Must be last
  */
 enum idpf_queue_flags_t {
@@ -289,6 +292,7 @@ enum idpf_queue_flags_t {
        __IDPF_Q_CRC_EN,
        __IDPF_Q_HSPLIT_EN,
        __IDPF_Q_PTP,
+       __IDPF_Q_NOIRQ,
 
        __IDPF_Q_FLAGS_NBITS,
 };
index 259d50fded67b11674f8c0fd0f2235f4a9697699..4cc58c83688cabf9f438b5f1102ece13df16cfd2 100644 (file)
@@ -76,7 +76,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
        int num_vecs = vport->num_q_vectors;
        struct idpf_vec_regs *reg_vals;
        int num_regs, i, err = 0;
-       u32 rx_itr, tx_itr;
+       u32 rx_itr, tx_itr, val;
        u16 total_vecs;
 
        total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -120,6 +120,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
                intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
        }
 
+       /* Data vector for NOIRQ queues */
+
+       val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+       vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+       val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
+             FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+       vport->noirq_dyn_ctl_ena = val;
+
 free_reg_vals:
        kfree(reg_vals);
 
index 3c3c8fc0def3e079ec41688aa4987a4286ceb9fc..357358e9043abd2ef4afdb35018bf372ed68f495 100644 (file)
@@ -2018,21 +2018,31 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
                struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
 
                for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
+                       const struct idpf_tx_queue *txq = tx_qgrp->txqs[j];
+                       const struct idpf_q_vector *vec;
+                       u32 v_idx, tx_itr_idx;
+
                        vqv[k].queue_type =
                                cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
-                       vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+                       vqv[k].queue_id = cpu_to_le32(txq->q_id);
 
-                       if (idpf_is_queue_model_split(vport->txq_model)) {
-                               vqv[k].vector_id =
-                               cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
-                               vqv[k].itr_idx =
-                               cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
+                       if (idpf_queue_has(NOIRQ, txq))
+                               vec = NULL;
+                       else if (idpf_is_queue_model_split(vport->txq_model))
+                               vec = txq->txq_grp->complq->q_vector;
+                       else
+                               vec = txq->q_vector;
+
+                       if (vec) {
+                               v_idx = vec->v_idx;
+                               tx_itr_idx = vec->tx_itr_idx;
                        } else {
-                               vqv[k].vector_id =
-                               cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
-                               vqv[k].itr_idx =
-                               cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
+                               v_idx = vport->noirq_v_idx;
+                               tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
                        }
+
+                       vqv[k].vector_id = cpu_to_le16(v_idx);
+                       vqv[k].itr_idx = cpu_to_le32(tx_itr_idx);
                }
        }
 
@@ -2050,6 +2060,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
 
                for (j = 0; j < num_rxq; j++, k++) {
                        struct idpf_rx_queue *rxq;
+                       u32 v_idx, rx_itr_idx;
 
                        if (idpf_is_queue_model_split(vport->rxq_model))
                                rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
@@ -2059,8 +2070,17 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
                        vqv[k].queue_type =
                                cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
                        vqv[k].queue_id = cpu_to_le32(rxq->q_id);
-                       vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
-                       vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
+
+                       if (idpf_queue_has(NOIRQ, rxq)) {
+                               v_idx = vport->noirq_v_idx;
+                               rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
+                       } else {
+                               v_idx = rxq->q_vector->v_idx;
+                               rx_itr_idx = rxq->q_vector->rx_itr_idx;
+                       }
+
+                       vqv[k].vector_id = cpu_to_le16(v_idx);
+                       vqv[k].itr_idx = cpu_to_le32(rx_itr_idx);
                }
        }
 
@@ -3281,9 +3301,15 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
 {
        struct idpf_vector_info vec_info;
        int num_alloc_vecs;
+       u32 req;
 
        vec_info.num_curr_vecs = vport->num_q_vectors;
-       vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
+       if (vec_info.num_curr_vecs)
+               vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
+
+       req = max(vport->num_txq, vport->num_rxq) + IDPF_RESERVED_VECS;
+       vec_info.num_req_vecs = req;
+
        vec_info.default_vport = vport->default_vport;
        vec_info.index = vport->idx;
 
@@ -3296,7 +3322,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
                return -EINVAL;
        }
 
-       vport->num_q_vectors = num_alloc_vecs;
+       vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
 
        return 0;
 }