hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
- /* qmprc is not cleared on read, manual reset it */
- hwstats->qmprc = 0;
for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
i < wx->mac.max_rx_queues; i++)
- hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
+ hwstats->qmprc += rd32_wrap(wx, WX_PX_MPRC(i),
+ &wx->last_stats.qmprc[i]);
spin_unlock(&wx->hw_stats_lock);
}
{
u16 i = 0;
- for (i = 0; i < wx->mac.max_rx_queues; i++)
+ for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
+ i < wx->mac.max_rx_queues; i++) {
wr32(wx, WX_PX_MPRC(i), 0);
+ wx->last_stats.qmprc[i] = 0;
+ }
rd32(wx, WX_RDM_PKT_CNT);
rd32(wx, WX_TDM_PKT_CNT);
u64 fdirmiss;
};
+struct wx_last_stats {
+ u32 qmprc[128];
+};
+
enum wx_state {
WX_STATE_RESETTING,
WX_STATE_SWFW_BUSY,
bool default_up;
struct wx_hw_stats stats;
+ struct wx_last_stats last_stats;
spinlock_t hw_stats_lock; /* spinlock for accessing to hw stats */
u64 tx_busy;
u64 non_eop_descs;
return wr32(wx, reg + 0xB500, value);
}
+static inline u32
+rd32_wrap(struct wx *wx, u32 reg, u32 *last)
+{
+ u32 val, delta;
+
+ val = rd32(wx, reg);
+ delta = val - *last;
+ *last = val;
+
+ return delta;
+}
+
/* On some domestic CPU platforms, sometimes IO is not synchronized with
* flushing memory, here use readl() to flush PCI read and write.
*/