]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
hinic3: Add .ndo_tx_timeout and .ndo_get_stats64
authorFan Gong <gongfan1@huawei.com>
Wed, 14 Jan 2026 08:38:22 +0000 (16:38 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 20 Jan 2026 09:34:31 +0000 (10:34 +0100)
Implement following callback function:
.ndo_tx_timeout
.ndo_get_stats64

Use a work queue to trace tx_timeout callback and dump necessary
debug information.

Co-developed-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Fan Gong <gongfan1@huawei.com>
Link: https://patch.msgid.link/ec34d2ff9b142e1e142e47700714533baf7e659c.1768375903.git.zhuyikai1@h-partners.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
drivers/net/ethernet/huawei/hinic3/hinic3_main.c
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
drivers/net/ethernet/huawei/hinic3/hinic3_tx.h

index 3c15f22973feeb3affcb62bd0e19478c894b17b3..58bc561f95b33e750b3278c8c96a78eb86238053 100644 (file)
@@ -17,6 +17,15 @@ enum hinic3_event_service_type {
        HINIC3_EVENT_SRV_NIC  = 1
 };
 
+enum hinic3_fault_err_level {
+       HINIC3_FAULT_LEVEL_SERIOUS_FLR = 3,
+};
+
+enum hinic3_fault_source_type {
+       HINIC3_FAULT_SRC_HW_PHY_FAULT = 9,
+       HINIC3_FAULT_SRC_TX_TIMEOUT   = 22,
+};
+
 #define HINIC3_SRV_EVENT_TYPE(svc, type)    (((svc) << 16) | (type))
 
 /* driver-specific data of pci_dev */
index ce10ae7c0d9eb101923315ceb0a880754408aa3c..2bd306f09cd595a41c8cf39fd08faeabb90bfdcc 100644 (file)
@@ -108,6 +108,22 @@ static void hinic3_free_txrxqs(struct net_device *netdev)
        hinic3_free_txqs(netdev);
 }
 
+static void hinic3_periodic_work_handler(struct work_struct *work)
+{
+       struct delayed_work *delay = to_delayed_work(work);
+       struct hinic3_nic_dev *nic_dev;
+
+       nic_dev = container_of(delay, struct hinic3_nic_dev, periodic_work);
+       if (test_and_clear_bit(HINIC3_EVENT_WORK_TX_TIMEOUT,
+                              &nic_dev->event_flag))
+               dev_info(nic_dev->hwdev->dev,
+                        "Fault event report, src: %u, level: %u\n",
+                        HINIC3_FAULT_SRC_TX_TIMEOUT,
+                        HINIC3_FAULT_LEVEL_SERIOUS_FLR);
+
+       queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
+}
+
 static int hinic3_init_nic_dev(struct net_device *netdev,
                               struct hinic3_hwdev *hwdev)
 {
@@ -123,6 +139,15 @@ static int hinic3_init_nic_dev(struct net_device *netdev,
        nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
        nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
 
+       nic_dev->workq = create_singlethread_workqueue(HINIC3_NIC_DEV_WQ_NAME);
+       if (!nic_dev->workq) {
+               dev_err(hwdev->dev, "Failed to initialize nic workqueue\n");
+               return -ENOMEM;
+       }
+
+       INIT_DELAYED_WORK(&nic_dev->periodic_work,
+                         hinic3_periodic_work_handler);
+
        return 0;
 }
 
@@ -276,6 +301,11 @@ static void hinic3_nic_event(struct auxiliary_device *adev,
        }
 }
 
+static void hinic3_free_nic_dev(struct hinic3_nic_dev *nic_dev)
+{
+       destroy_workqueue(nic_dev->workq);
+}
+
 static int hinic3_nic_probe(struct auxiliary_device *adev,
                            const struct auxiliary_device_id *id)
 {
@@ -316,7 +346,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
 
        err = hinic3_init_nic_io(nic_dev);
        if (err)
-               goto err_free_netdev;
+               goto err_free_nic_dev;
 
        err = hinic3_sw_init(netdev);
        if (err)
@@ -329,6 +359,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
        if (err)
                goto err_uninit_sw;
 
+       queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
        netif_carrier_off(netdev);
 
        err = register_netdev(netdev);
@@ -338,6 +369,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
        return 0;
 
 err_uninit_nic_feature:
+       disable_delayed_work_sync(&nic_dev->periodic_work);
        hinic3_update_nic_feature(nic_dev, 0);
        hinic3_set_nic_feature_to_hw(nic_dev);
 
@@ -346,7 +378,8 @@ err_uninit_sw:
 
 err_free_nic_io:
        hinic3_free_nic_io(nic_dev);
-
+err_free_nic_dev:
+       hinic3_free_nic_dev(nic_dev);
 err_free_netdev:
        free_netdev(netdev);
 
@@ -368,6 +401,9 @@ static void hinic3_nic_remove(struct auxiliary_device *adev)
        netdev = nic_dev->netdev;
        unregister_netdev(netdev);
 
+       disable_delayed_work_sync(&nic_dev->periodic_work);
+       hinic3_free_nic_dev(nic_dev);
+
        hinic3_update_nic_feature(nic_dev, 0);
        hinic3_set_nic_feature_to_hw(nic_dev);
        hinic3_sw_uninit(netdev);
index 15ff01c1c7b7be8d5ba4356789e50b16d5d96dcf..39091f4723727816f66ca07beef8fb4c6b3f851b 100644 (file)
@@ -517,11 +517,88 @@ static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
        return 0;
 }
 
+static void hinic3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+       struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+       struct hinic3_io_queue *sq;
+       u16 sw_pi, hw_ci;
+
+       sq = nic_dev->txqs[txqueue].sq;
+       sw_pi = hinic3_get_sq_local_pi(sq);
+       hw_ci = hinic3_get_sq_hw_ci(sq);
+       netdev_dbg(netdev,
+                  "txq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx.\n",
+                  txqueue, sw_pi, hw_ci, hinic3_get_sq_local_ci(sq),
+                  nic_dev->q_params.irq_cfg[txqueue].napi.state);
+
+       if (sw_pi != hw_ci)
+               set_bit(HINIC3_EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag);
+}
+
+static void hinic3_get_stats64(struct net_device *netdev,
+                              struct rtnl_link_stats64 *stats)
+{
+       struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+       u64 bytes, packets, dropped, errors;
+       struct hinic3_txq_stats *txq_stats;
+       struct hinic3_rxq_stats *rxq_stats;
+       struct hinic3_txq *txq;
+       struct hinic3_rxq *rxq;
+       unsigned int start;
+       int i;
+
+       bytes = 0;
+       packets = 0;
+       dropped = 0;
+       for (i = 0; i < nic_dev->max_qps; i++) {
+               if (!nic_dev->txqs)
+                       break;
+
+               txq = &nic_dev->txqs[i];
+               txq_stats = &txq->txq_stats;
+               do {
+                       start = u64_stats_fetch_begin(&txq_stats->syncp);
+                       bytes += txq_stats->bytes;
+                       packets += txq_stats->packets;
+                       dropped += txq_stats->dropped;
+               } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+       }
+       stats->tx_packets = packets;
+       stats->tx_bytes   = bytes;
+       stats->tx_dropped = dropped;
+
+       bytes = 0;
+       packets = 0;
+       errors = 0;
+       dropped = 0;
+       for (i = 0; i < nic_dev->max_qps; i++) {
+               if (!nic_dev->rxqs)
+                       break;
+
+               rxq = &nic_dev->rxqs[i];
+               rxq_stats = &rxq->rxq_stats;
+               do {
+                       start = u64_stats_fetch_begin(&rxq_stats->syncp);
+                       bytes += rxq_stats->bytes;
+                       packets += rxq_stats->packets;
+                       errors += rxq_stats->csum_errors +
+                               rxq_stats->other_errors;
+                       dropped += rxq_stats->dropped;
+               } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+       }
+       stats->rx_packets = packets;
+       stats->rx_bytes   = bytes;
+       stats->rx_errors  = errors;
+       stats->rx_dropped = dropped;
+}
+
 static const struct net_device_ops hinic3_netdev_ops = {
        .ndo_open             = hinic3_open,
        .ndo_stop             = hinic3_close,
        .ndo_change_mtu       = hinic3_change_mtu,
        .ndo_set_mac_address  = hinic3_set_mac_addr,
+       .ndo_tx_timeout       = hinic3_tx_timeout,
+       .ndo_get_stats64      = hinic3_get_stats64,
        .ndo_start_xmit       = hinic3_xmit_frame,
 };
 
index 52bcf6fb14f2b7089572bb838176003a55d37d28..b8c9c325a45abcc8a7eb0979afa2ce7158ff9f9c 100644 (file)
@@ -13,6 +13,10 @@ enum hinic3_flags {
        HINIC3_RSS_ENABLE,
 };
 
+enum hinic3_event_work_flags {
+       HINIC3_EVENT_WORK_TX_TIMEOUT,
+};
+
 enum hinic3_rss_hash_type {
        HINIC3_RSS_HASH_ENGINE_TYPE_XOR  = 0,
        HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1,
@@ -83,9 +87,13 @@ struct hinic3_nic_dev {
 
        struct hinic3_intr_coal_info    *intr_coalesce;
 
+       struct workqueue_struct         *workq;
+       struct delayed_work             periodic_work;
        /* lock for enable/disable port */
        struct mutex                    port_state_mutex;
 
+       /* flag bits defined by hinic3_event_work_flags */
+       unsigned long                   event_flag;
        bool                            link_status_up;
 };
 
index 16c00c3bb1ed9e12e1094defcdcb6b756a4ae594..159c291fa29376f2500da937b029fd7b5af3a88e 100644 (file)
         HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
         HINIC3_LRO_PKT_HDR_LEN_IPV4)
 
+static void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats)
+{
+       u64_stats_update_begin(&rxq_stats->syncp);
+       rxq_stats->bytes = 0;
+       rxq_stats->packets = 0;
+       rxq_stats->errors = 0;
+       rxq_stats->csum_errors = 0;
+       rxq_stats->other_errors = 0;
+       rxq_stats->dropped = 0;
+       rxq_stats->rx_buf_empty = 0;
+
+       rxq_stats->alloc_skb_err = 0;
+       rxq_stats->alloc_rx_buf_err = 0;
+       rxq_stats->restore_drop_sge = 0;
+       u64_stats_update_end(&rxq_stats->syncp);
+}
+
+static void hinic3_rxq_stats_init(struct hinic3_rxq *rxq)
+{
+       struct hinic3_rxq_stats *rxq_stats = &rxq->rxq_stats;
+
+       u64_stats_init(&rxq_stats->syncp);
+       hinic3_rxq_clean_stats(rxq_stats);
+}
+
 int hinic3_alloc_rxqs(struct net_device *netdev)
 {
        struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -54,6 +79,8 @@ int hinic3_alloc_rxqs(struct net_device *netdev)
                rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
                rxq->q_depth = nic_dev->q_params.rq_depth;
                rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+
+               hinic3_rxq_stats_init(rxq);
        }
 
        return 0;
index 44ae841a3648967a7c22877dc115bae4bbf04345..68fc237d642b4150c4acc1b436ceea4ae2206334 100644 (file)
 #define RQ_CQE_STATUS_GET(val, member) \
        FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
 
+struct hinic3_rxq_stats {
+       u64                   packets;
+       u64                   bytes;
+       u64                   errors;
+       u64                   csum_errors;
+       u64                   other_errors;
+       u64                   dropped;
+       u64                   rx_buf_empty;
+       u64                   alloc_skb_err;
+       u64                   alloc_rx_buf_err;
+       u64                   restore_drop_sge;
+       struct u64_stats_sync syncp;
+};
+
 /* RX Completion information that is provided by HW for a specific RX WQE */
 struct hinic3_rq_cqe {
        __le32 status;
@@ -59,6 +73,7 @@ struct hinic3_rxq {
        u16                     buf_len;
        u32                     buf_len_shift;
 
+       struct hinic3_rxq_stats rxq_stats;
        u32                     cons_idx;
        u32                     delta;
 
index 92c43c05e3f2dc7929063eb84a3bb08c84461a99..ef32aed7d761dbaa45c153ab7626718b630828e3 100644 (file)
 
 #define MIN_SKB_LEN                32
 
+static void hinic3_txq_clean_stats(struct hinic3_txq_stats *txq_stats)
+{
+       u64_stats_update_begin(&txq_stats->syncp);
+       txq_stats->bytes = 0;
+       txq_stats->packets = 0;
+       txq_stats->busy = 0;
+       txq_stats->dropped = 0;
+
+       txq_stats->skb_pad_err = 0;
+       txq_stats->frag_len_overflow = 0;
+       txq_stats->offload_cow_skb_err = 0;
+       txq_stats->map_frag_err = 0;
+       txq_stats->unknown_tunnel_pkt = 0;
+       txq_stats->frag_size_err = 0;
+       u64_stats_update_end(&txq_stats->syncp);
+}
+
+static void hinic3_txq_stats_init(struct hinic3_txq *txq)
+{
+       struct hinic3_txq_stats *txq_stats = &txq->txq_stats;
+
+       u64_stats_init(&txq_stats->syncp);
+       hinic3_txq_clean_stats(txq_stats);
+}
+
 int hinic3_alloc_txqs(struct net_device *netdev)
 {
        struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -40,6 +65,8 @@ int hinic3_alloc_txqs(struct net_device *netdev)
                txq->q_depth = nic_dev->q_params.sq_depth;
                txq->q_mask = nic_dev->q_params.sq_depth - 1;
                txq->dev = &pdev->dev;
+
+               hinic3_txq_stats_init(txq);
        }
 
        return 0;
index 7e1b872ba752f59519e110582c0c07c322422c80..00194f2a1bcc1aaa8eebfeddf7fe9d78919198d9 100644 (file)
@@ -100,6 +100,20 @@ struct hinic3_sq_wqe_combo {
        u32                       task_type;
 };
 
+struct hinic3_txq_stats {
+       u64                   packets;
+       u64                   bytes;
+       u64                   busy;
+       u64                   dropped;
+       u64                   skb_pad_err;
+       u64                   frag_len_overflow;
+       u64                   offload_cow_skb_err;
+       u64                   map_frag_err;
+       u64                   unknown_tunnel_pkt;
+       u64                   frag_size_err;
+       struct u64_stats_sync syncp;
+};
+
 struct hinic3_dma_info {
        dma_addr_t dma;
        u32        len;
@@ -123,6 +137,8 @@ struct hinic3_txq {
 
        struct hinic3_tx_info   *tx_info;
        struct hinic3_io_queue  *sq;
+
+       struct hinic3_txq_stats txq_stats;
 } ____cacheline_aligned;
 
 struct hinic3_dyna_txq_res {