* prev_pkt would be negative if there was no
* pending work.
*/
- packets = ring_stats->stats.pkts & INT_MAX;
- if (ring_stats->tx_stats.prev_pkt == packets) {
+ packets = ice_stats_read(ring_stats, pkts) & INT_MAX;
+ if (ring_stats->tx.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
* to ice_get_tx_pending()
*/
smp_rmb();
- ring_stats->tx_stats.prev_pkt =
+ ring_stats->tx.prev_pkt =
ice_get_tx_pending(tx_ring) ? packets : -1;
}
}
ice_fetch_u64_stats_per_ring(ring->ring_stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
- vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
- vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
+ vsi->tx_restart += ring->ring_stats->tx_restart_q;
+ vsi->tx_busy += ring->ring_stats->tx_busy;
+ vsi->tx_linearize += ring->ring_stats->tx_linearize;
}
}
ice_fetch_u64_stats_per_ring(ring_stats, &pkts, &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
- vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
- vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
+ vsi->rx_buf_failed += ring_stats->rx_buf_failed;
+ vsi->rx_page_failed += ring_stats->rx_page_failed;
}
/* update XDP Tx rings counters */
if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_tx_wake_queue(txring_txq(tx_ring));
- ++tx_ring->ring_stats->tx_stats.restart_q;
+ ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
}
}
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->ring_stats->tx_stats.prev_pkt = -1;
+ tx_ring->ring_stats->tx.prev_pkt = -1;
return 0;
err:
addr = libeth_rx_alloc(&fq, ntu);
if (addr == DMA_MAPPING_ERROR) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
break;
}
addr = libeth_rx_alloc(&hdr_fq, ntu);
if (addr == DMA_MAPPING_ERROR) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
break;
/* exit if we failed to retrieve a buffer */
if (!skb) {
libeth_xdp_return_buff_slow(xdp);
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
continue;
}
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_tx_start_queue(txring_txq(tx_ring));
- ++tx_ring->ring_stats->tx_stats.restart_q;
+ ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
return 0;
}
if (__skb_linearize(skb))
goto out_drop;
count = ice_txd_use_count(skb->len);
- tx_ring->ring_stats->tx_stats.tx_linearize++;
+ ice_stats_inc(tx_ring->ring_stats, tx_linearize);
}
/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
*/
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
- tx_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(tx_ring->ring_stats, tx_busy);
return NETDEV_TX_BUSY;
}
u8 header_len;
};
-struct ice_txq_stats {
- u64 restart_q;
- u64 tx_busy;
- u64 tx_linearize;
- int prev_pkt; /* negative if no pending Tx descriptors */
-};
-
-struct ice_rxq_stats {
- u64 non_eop_descs;
- u64 alloc_page_failed;
- u64 alloc_buf_failed;
-};
-
struct ice_ring_stats {
struct rcu_head rcu; /* to avoid race on free */
struct u64_stats_sync syncp;
u64 pkts;
u64 bytes;
union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
+ struct_group(tx,
+ u64 tx_restart_q;
+ u64 tx_busy;
+ u64 tx_linearize;
+ /* negative if no pending Tx descriptors */
+ int prev_pkt;
+ );
+ struct_group(rx,
+ u64 rx_non_eop_descs;
+ u64 rx_page_failed;
+ u64 rx_buf_failed;
+ );
};
);
};
+/**
+ * ice_stats_read - Read a single ring stat value
+ * @stats: pointer to ring_stats structure for a queue
+ * @member: the ice_ring_stats member to read
+ *
+ * Shorthand for reading a single 64-bit stat value from struct
+ * ice_ring_stats.
+ *
+ * Return: the value of the requested stat.
+ */
+#define ice_stats_read(stats, member) ({ \
+ struct ice_ring_stats *__stats = (stats); \
+ __stats->member; \
+})
+
+/**
+ * ice_stats_inc - Increment a single ring stat value
+ * @stats: pointer to the ring_stats structure for a queue
+ * @member: the ice_ring_stats member to increment
+ *
+ * Shorthand for incrementing a single 64-bit stat value in struct
+ * ice_ring_stats.
+ */
+#define ice_stats_inc(stats, member) do { \
+ struct ice_ring_stats *__stats = (stats); \
+ __stats->member++; \
+} while (0)
+
enum ice_ring_state_t {
ICE_TX_XPS_INIT_DONE,
ICE_TX_NBITS,
return ICE_XDP_CONSUMED;
busy:
- xdp_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(xdp_ring->ring_stats, tx_busy);
return ICE_XDP_CONSUMED;
}
if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
- rx_ring->ring_stats->rx_stats.non_eop_descs++;
+ ice_stats_inc(rx_ring->ring_stats, rx_non_eop_descs);
return true;
}
return ICE_XDP_TX;
busy:
- xdp_ring->ring_stats->tx_stats.tx_busy++;
+ ice_stats_inc(xdp_ring->ring_stats, tx_busy);
return ICE_XDP_CONSUMED;
}
xsk_buff_free(first);
first = NULL;
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
continue;
}