]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ixgbe: refactor: use DECLARE_BITMAP for ring state field
authorAleksandr Loktionov <aleksandr.loktionov@intel.com>
Thu, 22 Jan 2026 08:50:39 +0000 (09:50 +0100)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Tue, 3 Mar 2026 16:56:05 +0000 (08:56 -0800)
Convert the ring state field from 'unsigned long' to a proper bitmap
using DECLARE_BITMAP macro, aligning with the implementation pattern
already used in the i40e driver.

This change:
- Adds __IXGBE_RING_STATE_NBITS as the bitmap size sentinel to enum
  ixgbe_ring_state_t (consistent with i40e's __I40E_RING_STATE_NBITS)
- Changes 'unsigned long state' to 'DECLARE_BITMAP(state,
  __IXGBE_RING_STATE_NBITS)' in struct ixgbe_ring
- Removes the address-of operator (&) when passing ring->state to bit
  manipulation functions, as bitmap arrays naturally decay to pointers

The change maintains functional equivalence while using the
more appropriate kernel bitmap API, consistent with other Intel Ethernet
drivers.

Signed-off-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com>
Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c

index dce4936708eb4402d3573bbfd4a869dcc7d560e5..59a1cee40b43674eebaec489a52ad786132c4d7a 100644 (file)
@@ -322,10 +322,11 @@ enum ixgbe_ring_state_t {
        __IXGBE_HANG_CHECK_ARMED,
        __IXGBE_TX_XDP_RING,
        __IXGBE_TX_DISABLED,
+       __IXGBE_RING_STATE_NBITS, /* must be last */
 };
 
 #define ring_uses_build_skb(ring) \
-       test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
+       test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, (ring)->state)
 
 struct ixgbe_fwd_adapter {
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -336,23 +337,23 @@ struct ixgbe_fwd_adapter {
 };
 
 #define check_for_tx_hang(ring) \
-       test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+       test_bit(__IXGBE_TX_DETECT_HANG, (ring)->state)
 #define set_check_for_tx_hang(ring) \
-       set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+       set_bit(__IXGBE_TX_DETECT_HANG, (ring)->state)
 #define clear_check_for_tx_hang(ring) \
-       clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+       clear_bit(__IXGBE_TX_DETECT_HANG, (ring)->state)
 #define ring_is_rsc_enabled(ring) \
-       test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+       test_bit(__IXGBE_RX_RSC_ENABLED, (ring)->state)
 #define set_ring_rsc_enabled(ring) \
-       set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+       set_bit(__IXGBE_RX_RSC_ENABLED, (ring)->state)
 #define clear_ring_rsc_enabled(ring) \
-       clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+       clear_bit(__IXGBE_RX_RSC_ENABLED, (ring)->state)
 #define ring_is_xdp(ring) \
-       test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
+       test_bit(__IXGBE_TX_XDP_RING, (ring)->state)
 #define set_ring_xdp(ring) \
-       set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
+       set_bit(__IXGBE_TX_XDP_RING, (ring)->state)
 #define clear_ring_xdp(ring) \
-       clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
+       clear_bit(__IXGBE_TX_XDP_RING, (ring)->state)
 struct ixgbe_ring {
        struct ixgbe_ring *next;        /* pointer to next ring in q_vector */
        struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
@@ -364,7 +365,7 @@ struct ixgbe_ring {
                struct ixgbe_tx_buffer *tx_buffer_info;
                struct ixgbe_rx_buffer *rx_buffer_info;
        };
-       unsigned long state;
+       DECLARE_BITMAP(state, __IXGBE_RING_STATE_NBITS);
        u8 __iomem *tail;
        dma_addr_t dma;                 /* phys. address of descriptor ring */
        unsigned int size;              /* length in bytes */
@@ -453,7 +454,7 @@ struct ixgbe_ring_feature {
  */
 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
-       if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+       if (test_bit(__IXGBE_RX_3K_BUFFER, ring->state))
                return IXGBE_RXBUFFER_3K;
 #if (PAGE_SIZE < 8192)
        if (ring_uses_build_skb(ring))
@@ -465,7 +466,7 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
 #if (PAGE_SIZE < 8192)
-       if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+       if (test_bit(__IXGBE_RX_3K_BUFFER, ring->state))
                return 1;
 #endif
        return 0;
index 87abb36f50c1342fc1f8ad2878e8bf93825849a0..1db4bd5cc2ba6bff69b1c53d3c824456bed1a2d7 100644 (file)
@@ -976,7 +976,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                 * can be marked as checksum errors.
                 */
                if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
+                       set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, ring->state);
 
 #ifdef IXGBE_FCOE
                if (adapter->netdev->fcoe_mtu) {
@@ -984,7 +984,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((rxr_idx >= f->offset) &&
                            (rxr_idx < f->offset + f->indices))
-                               set_bit(__IXGBE_RX_FCOE, &ring->state);
+                               set_bit(__IXGBE_RX_FCOE, ring->state);
                }
 
 #endif /* IXGBE_FCOE */
index e4101c59074d685b202fd948e69b22be4bcf35e5..0bc806aaed9056c382f657bb28b9e44d86b2f805 100644 (file)
@@ -968,7 +968,7 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
 
        for (i = 0; i < adapter->num_tx_queues; i++)
                clear_bit(__IXGBE_HANG_CHECK_ARMED,
-                         &adapter->tx_ring[i]->state);
+                         adapter->tx_ring[i]->state);
 }
 
 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -1011,7 +1011,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 
                tc = tx_ring->dcb_tc;
                if (xoff[tc])
-                       clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+                       clear_bit(__IXGBE_HANG_CHECK_ARMED, tx_ring->state);
        }
 
        for (i = 0; i < adapter->num_xdp_queues; i++) {
@@ -1019,7 +1019,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 
                tc = xdp_ring->dcb_tc;
                if (xoff[tc])
-                       clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
+                       clear_bit(__IXGBE_HANG_CHECK_ARMED, xdp_ring->state);
        }
 }
 
@@ -1103,11 +1103,11 @@ static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
        if (tx_done_old == tx_done && tx_pending)
                /* make sure it is true for two checks in a row */
                return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
-                                       &tx_ring->state);
+                                       tx_ring->state);
        /* update completed stats and continue */
        tx_ring->tx_stats.tx_done_old = tx_done;
        /* reset the countdown */
-       clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+       clear_bit(__IXGBE_HANG_CHECK_ARMED, tx_ring->state);
 
        return false;
 }
@@ -1660,7 +1660,7 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
 {
        __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 
-       return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
+       return test_bit(__IXGBE_RX_FCOE, ring->state) &&
               ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
                (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
                             IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1708,7 +1708,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
                 * checksum errors.
                 */
                if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
-                   test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
+                   test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, ring->state))
                        return;
 
                ring->rx_stats.csum_err++;
@@ -3526,7 +3526,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *ring = adapter->tx_ring[i];
                                if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
-                                                      &ring->state))
+                                                      ring->state))
                                        reinit_count++;
                        }
                        if (reinit_count) {
@@ -3952,13 +3952,13 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                ring->atr_sample_rate = adapter->atr_sample_rate;
                ring->atr_count = 0;
-               set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+               set_bit(__IXGBE_TX_FDIR_INIT_DONE, ring->state);
        } else {
                ring->atr_sample_rate = 0;
        }
 
        /* initialize XPS */
-       if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
+       if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, ring->state)) {
                struct ixgbe_q_vector *q_vector = ring->q_vector;
 
                if (q_vector)
@@ -3967,7 +3967,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
                                            ring->queue_index);
        }
 
-       clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
+       clear_bit(__IXGBE_HANG_CHECK_ARMED, ring->state);
 
        /* reinitialize tx_buffer_info */
        memset(ring->tx_buffer_info, 0,
@@ -4173,7 +4173,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                        srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
                else
                        srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-       } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
+       } else if (test_bit(__IXGBE_RX_3K_BUFFER, rx_ring->state)) {
                srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
        } else {
                srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
@@ -4558,7 +4558,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                 * higher than the MTU of the PF.
                 */
                if (ring_uses_build_skb(ring) &&
-                   !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+                   !test_bit(__IXGBE_RX_3K_BUFFER, ring->state))
                        rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
                                  IXGBE_RXDCTL_RLPML_EN;
 #endif
@@ -4733,27 +4733,27 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                rx_ring = adapter->rx_ring[i];
 
                clear_ring_rsc_enabled(rx_ring);
-               clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
-               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+               clear_bit(__IXGBE_RX_3K_BUFFER, rx_ring->state);
+               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, rx_ring->state);
 
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                        set_ring_rsc_enabled(rx_ring);
 
-               if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
-                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+               if (test_bit(__IXGBE_RX_FCOE, rx_ring->state))
+                       set_bit(__IXGBE_RX_3K_BUFFER, rx_ring->state);
 
                if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
                        continue;
 
-               set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+               set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, rx_ring->state);
 
 #if (PAGE_SIZE < 8192)
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
-                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+                       set_bit(__IXGBE_RX_3K_BUFFER, rx_ring->state);
 
                if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
                    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
-                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+                       set_bit(__IXGBE_RX_3K_BUFFER, rx_ring->state);
 #endif
        }
 }
@@ -7944,10 +7944,10 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
                        set_bit(__IXGBE_TX_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i]->state));
+                               adapter->tx_ring[i]->state);
                for (i = 0; i < adapter->num_xdp_queues; i++)
                        set_bit(__IXGBE_TX_FDIR_INIT_DONE,
-                               &adapter->xdp_ring[i]->state);
+                               adapter->xdp_ring[i]->state);
                /* re-enable flow director interrupts */
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
        } else {
@@ -9488,7 +9488,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
 
        /* add the ATR filter if ATR is on */
-       if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+       if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, tx_ring->state))
                ixgbe_atr(tx_ring, first);
 
 #ifdef IXGBE_FCOE
@@ -9528,7 +9528,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
 
        tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
-       if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
+       if (unlikely(test_bit(__IXGBE_TX_DISABLED, tx_ring->state)))
                return NETDEV_TX_BUSY;
 
        return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
@@ -11013,7 +11013,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
        if (unlikely(!ring))
                return -ENXIO;
 
-       if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
+       if (unlikely(test_bit(__IXGBE_TX_DISABLED, ring->state)))
                return -ENXIO;
 
        if (static_branch_unlikely(&ixgbe_xdp_locking_key))
@@ -11119,7 +11119,7 @@ static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
 static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
                              struct ixgbe_ring *tx_ring)
 {
-       set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+       set_bit(__IXGBE_TX_DISABLED, tx_ring->state);
        ixgbe_disable_txr_hw(adapter, tx_ring);
 }
 
@@ -11273,9 +11273,9 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
                ixgbe_configure_tx_ring(adapter, xdp_ring);
        ixgbe_configure_rx_ring(adapter, rx_ring);
 
-       clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+       clear_bit(__IXGBE_TX_DISABLED, tx_ring->state);
        if (xdp_ring)
-               clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+               clear_bit(__IXGBE_TX_DISABLED, xdp_ring->state);
 
        /* Rx/Tx/XDP Tx share the same napi context. */
        napi_enable(&rx_ring->q_vector->napi);
index 7b941505a9d02451f0bafe527633c7529928504c..89f96c463f0224f00b1738c8f01c792b099be0f0 100644 (file)
@@ -524,7 +524,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
 
        ring = adapter->xdp_ring[qid];
 
-       if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
+       if (test_bit(__IXGBE_TX_DISABLED, ring->state))
                return -ENETDOWN;
 
        if (!ring->xsk_pool)