FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
+ /* TMI */
+ FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
+ FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
+ FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
+
/* RPC */
FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
stat->u.old_reg_value_64 = new_reg_value;
}
+static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+}
+
static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd,
struct fbnic_rpc_stats *rpc)
{
void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
{
spin_lock(&fbd->hw_stats_lock);
+ fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
+ fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb);
fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q);
spin_lock(&fbd->hw_stats_lock);
__fbnic_get_hw_stats32(fbd);
+ fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
spin_unlock(&fbd->hw_stats_lock);
struct fbnic_eth_mac_stats eth_mac;
};
+struct fbnic_tmi_stats {
+ struct fbnic_hw_stat drop;
+ struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts;
+};
+
struct fbnic_rpc_stats {
struct fbnic_stat_counter unkn_etype, unkn_ext_hdr;
struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp;
struct fbnic_hw_stats {
struct fbnic_mac_stats mac;
+ struct fbnic_tmi_stats tmi;
struct fbnic_rpc_stats rpc;
struct fbnic_rxb_stats rxb;
struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];