From: Ahmed Zaki Date: Fri, 16 May 2025 22:19:09 +0000 (-0600) Subject: iavf: convert to NAPI IRQ affinity API X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b0ca7dc0e70e31d0ecf66b508b96a7026b769ceb;p=thirdparty%2Fkernel%2Flinux.git iavf: convert to NAPI IRQ affinity API Commit bd7c00605ee0 ("net: move aRFS rmap management and CPU affinity to core") allows the drivers to delegate the IRQ affinity to the NAPI instance. However, the driver needs to use a persistent NAPI config and explicitly set/unset the NAPI<->IRQ association. Convert to the new IRQ affinity API. Reviewed-by: Jacob Keller Signed-off-by: Ahmed Zaki Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen --- diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index eb86cca38be2b..a87e0c6d4017a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -114,8 +114,6 @@ struct iavf_q_vector { u16 reg_idx; /* register index of the interrupt */ char name[IFNAMSIZ + 15]; bool arm_wb_state; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; }; /* Helper macros to switch between ints/sec and what the register uses. diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 01e11ac5055b0..2f501c8264b4d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -527,33 +527,6 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; } -/** - * iavf_irq_affinity_notify - Callback for affinity changes - * @notify: context as to what irq was changed - * @mask: the new affinity mask - * - * This is a callback function used by the irq_set_affinity_notifier function - * so that we may register to receive changes to the irq affinity masks. - **/ -static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct iavf_q_vector *q_vector = - container_of(notify, struct iavf_q_vector, affinity_notify); - - cpumask_copy(&q_vector->affinity_mask, mask); -} - -/** - * iavf_irq_affinity_release - Callback for affinity notifier release - * @ref: internal core kernel usage - * - * This is a callback function used by the irq_set_affinity_notifier function - * to inform the current notification subscriber that they will no longer - * receive notifications. - **/ -static void iavf_irq_affinity_release(struct kref *ref) {} - /** * iavf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure @@ -568,7 +541,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; - int cpu; iavf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ @@ -603,17 +575,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) "Request_irq failed, error: %d\n", err); goto free_queue_irqs; } - /* register for affinity change notifications */ - q_vector->affinity_notify.notify = iavf_irq_affinity_notify; - q_vector->affinity_notify.release = - iavf_irq_affinity_release; - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* Spread the IRQ affinity hints across online CPUs. Note that - * get_cpu_mask returns a mask with a permanent lifetime so - * it's safe to use as a hint for irq_update_affinity_hint. - */ - cpu = cpumask_local_spread(q_vector->v_idx, -1); - irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; @@ -622,8 +583,6 @@ free_queue_irqs: while (vector) { vector--; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; - irq_set_affinity_notifier(irq_num, NULL); - irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } return err; @@ -665,6 +624,7 @@ static int iavf_request_misc_irq(struct iavf_adapter *adapter) **/ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) { + struct iavf_q_vector *q_vector; int vector, irq_num, q_vectors; if (!adapter->msix_entries) @@ -673,10 +633,10 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { + q_vector = &adapter->q_vectors[vector]; + netif_napi_set_irq_locked(&q_vector->napi, -1); irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; - irq_set_affinity_notifier(irq_num, NULL); - irq_update_affinity_hint(irq_num, NULL); - free_irq(irq_num, &adapter->q_vectors[vector]); + free_irq(irq_num, q_vector); } } @@ -1847,7 +1807,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter) **/ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) { - int q_idx = 0, num_q_vectors; + int q_idx = 0, num_q_vectors, irq_num; struct iavf_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; @@ -1857,14 +1817,15 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) return -ENOMEM; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector; q_vector = &adapter->q_vectors[q_idx]; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; q_vector->reg_idx = q_idx; - cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add_locked(adapter->netdev, &q_vector->napi, - iavf_napi_poll); + netif_napi_add_config_locked(adapter->netdev, &q_vector->napi, + iavf_napi_poll, q_idx); + netif_napi_set_irq_locked(&q_vector->napi, irq_num); } return 0; @@ -5377,6 +5338,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_alloc_etherdev; } + netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 422312b8b54a1..23e786b9793d3 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1648,7 +1648,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) * continue to poll, otherwise we must stop polling so the * interrupt can move to the correct cpu. */ - if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + if (!cpumask_test_cpu(cpu_id, + &q_vector->napi.config->affinity_mask)) { /* Tell napi that we are done polling */ napi_complete_done(napi, work_done);