]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bnxt_en: Synchronize tx when xdp redirects happen on same ring
authorPavan Chebbi <pavan.chebbi@broadcom.com>
Sat, 2 Apr 2022 00:21:10 +0000 (20:21 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Apr 2022 18:03:14 +0000 (20:03 +0200)
[ Upstream commit 4f81def272de17dc4bbd89ac38f49b2676c9b3d2 ]

If there are more CPUs than the number of TX XDP rings, multiple XDP
redirects can select the same TX ring based on the CPU on which
XDP redirect is called.  Add locking when needed and use static
key to decide whether to take the lock.

Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support")
Signed-off-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h

index fab8dd73fa84c8e040f1386f3d1dac9fd3a2895e..fdbcd48d991df17bbef4fe8e23093230d8cf3595 100644 (file)
@@ -3196,6 +3196,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
                }
                qidx = bp->tc_to_qidx[j];
                ring->queue_id = bp->q_info[qidx].queue_id;
+               spin_lock_init(&txr->xdp_tx_lock);
                if (i < bp->tx_nr_rings_xdp)
                        continue;
                if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -10274,6 +10275,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        if (irq_re_init)
                udp_tunnel_nic_reset_ntf(bp->dev);
 
+       if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
+               if (!static_key_enabled(&bnxt_xdp_locking_key))
+                       static_branch_enable(&bnxt_xdp_locking_key);
+       } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
+               static_branch_disable(&bnxt_xdp_locking_key);
+       }
        set_bit(BNXT_STATE_OPEN, &bp->state);
        bnxt_enable_int(bp);
        /* Enable TX queues */
index 2846d14756671a753ffa89d2dc9c9819960288a4..5f4a0bb36af3f1fc1afdb0f17f1c0cdb8b0530c0 100644 (file)
@@ -800,6 +800,8 @@ struct bnxt_tx_ring_info {
        u32                     dev_state;
 
        struct bnxt_ring_struct tx_ring_struct;
+       /* Synchronize simultaneous xdp_xmit on same ring */
+       spinlock_t              xdp_tx_lock;
 };
 
 #define BNXT_LEGACY_COAL_CMPL_PARAMS                                   \
index c8083df5e0ab848c8431c6bbd79f91de1b0f1f65..c59e46c7a1ca14b2d2df823e595fd07675e5907b 100644 (file)
@@ -20,6 +20,8 @@
 #include "bnxt.h"
 #include "bnxt_xdp.h"
 
+DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   struct bnxt_tx_ring_info *txr,
                                   dma_addr_t mapping, u32 len)
@@ -227,6 +229,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
        ring = smp_processor_id() % bp->tx_nr_rings_xdp;
        txr = &bp->tx_ring[ring];
 
+       if (static_branch_unlikely(&bnxt_xdp_locking_key))
+               spin_lock(&txr->xdp_tx_lock);
+
        for (i = 0; i < num_frames; i++) {
                struct xdp_frame *xdp = frames[i];
 
@@ -250,6 +255,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
                bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
        }
 
+       if (static_branch_unlikely(&bnxt_xdp_locking_key))
+               spin_unlock(&txr->xdp_tx_lock);
+
        return nxmit;
 }
 
index 0df40c3beb05024b68116679b6194173f73e1524..067bb5e821f542bbc24e51cd7f303667f1a8ade5 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef BNXT_XDP_H
 #define BNXT_XDP_H
 
+DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   struct bnxt_tx_ring_info *txr,
                                   dma_addr_t mapping, u32 len);