static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
struct netdev_queue_stats_rx *rx)
{
+ u64 bytes, packets, alloc_fail, alloc_fail_bdq;
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
- u64 bytes, packets, alloc_fail;
u64 csum_complete, csum_none;
+ struct fbnic_q_triad *qt;
unsigned int start;
if (!rxr)
return;
+ /* fbn->rx points to completion queues */
+ qt = container_of(rxr, struct fbnic_q_triad, cmpl);
+
stats = &rxr->stats;
do {
start = u64_stats_fetch_begin(&stats->syncp);
csum_none = stats->rx.csum_none;
} while (u64_stats_fetch_retry(&stats->syncp, start));
+ stats = &qt->sub0.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
+
+ stats = &qt->sub1.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
+
rx->bytes = bytes;
rx->packets = packets;
rx->alloc_fail = alloc_fail;
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
- rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed +
+ fbn->bdq_stats.bdq.alloc_failed;
rx->csum_complete = fbn->rx_stats.rx.csum_complete;
rx->csum_none = fbn->rx_stats.rx.csum_none;
}
netmem = page_pool_dev_alloc_netmems(bdq->page_pool);
if (!netmem) {
u64_stats_update_begin(&bdq->stats.syncp);
- bdq->stats.rx.alloc_failed++;
+ bdq->stats.bdq.alloc_failed++;
u64_stats_update_end(&bdq->stats.syncp);
break;
BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
}
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ struct fbnic_queue_stats *stats = &bdq->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->bdq_stats.bdq.alloc_failed += stats->bdq.alloc_failed;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.bdq) / 8 != 1);
+}
+
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
fbn->rx[rxr->q_idx] = NULL;
}
+static void fbnic_remove_bdq_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ if (!(bdq->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_bdq_counters(fbn, bdq);
+}
+
static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt)
{
page_pool_destroy(qt->sub0.page_pool);
}
for (j = 0; j < nv->rxt_count; j++, i++) {
- fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
- fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub1);
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
}
while (rxt_count) {
/* Configure header queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
- fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
/* Configure payload queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
- fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
/* Configure Rx completion queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
nv = fbn->napi[idx % fbn->num_napi];
- fbnic_aggregate_ring_rx_counters(fbn, &real->sub0);
- fbnic_aggregate_ring_rx_counters(fbn, &real->sub1);
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub0);
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub1);
fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl);
memcpy(real, qmem, sizeof(*real));