]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: mana: Trigger VF reset/recovery on health check failure due to HWC timeout
authorDipayaan Roy <dipayanroy@linux.microsoft.com>
Fri, 27 Feb 2026 08:15:02 +0000 (00:15 -0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 3 Mar 2026 10:14:22 +0000 (11:14 +0100)
The GF stats periodic query is used as mechanism to monitor HWC health
check. If this HWC command times out, it is a strong indication that
the device/SoC is in a faulty state and requires recovery.

Today, when a timeout is detected, the driver marks
hwc_timeout_occurred, clears cached stats, and stops rescheduling the
periodic work. However, the device itself is left in the same failing
state.

Extend the timeout handling path to trigger the existing MANA VF
recovery service by queueing a GDMA_EQE_HWC_RESET_REQUEST work item.
This is expected to initiate the appropriate recovery flow by suspende
resume first and if it fails then trigger a bus rescan.

This change is intentionally limited to HWC command timeouts and does
not trigger recovery for errors reported by the SoC as a normal command
response.

Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/aaFShvKnwR5FY8dH@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/microsoft/mana/mana_en.c
include/net/mana/gdma.h

index 37d2f108a839a439747cbda26593198a3d25d468..aef8612b73cb26cba2608e8aab9db772030cdf53 100644 (file)
@@ -490,15 +490,9 @@ static void mana_serv_reset(struct pci_dev *pdev)
                dev_info(&pdev->dev, "MANA reset cycle completed\n");
 
 out:
-       gc->in_service = false;
+       clear_bit(GC_IN_SERVICE, &gc->flags);
 }
 
-struct mana_serv_work {
-       struct work_struct serv_work;
-       struct pci_dev *pdev;
-       enum gdma_eqe_type type;
-};
-
 static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev)
 {
        switch (type) {
@@ -558,12 +552,42 @@ static void mana_serv_func(struct work_struct *w)
        module_put(THIS_MODULE);
 }
 
+int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type)
+{
+       struct mana_serv_work *mns_wk;
+
+       if (test_and_set_bit(GC_IN_SERVICE, &gc->flags)) {
+               dev_info(gc->dev, "Already in service\n");
+               return -EBUSY;
+       }
+
+       if (!try_module_get(THIS_MODULE)) {
+               dev_info(gc->dev, "Module is unloading\n");
+               clear_bit(GC_IN_SERVICE, &gc->flags);
+               return -ENODEV;
+       }
+
+       mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+       if (!mns_wk) {
+               module_put(THIS_MODULE);
+               clear_bit(GC_IN_SERVICE, &gc->flags);
+               return -ENOMEM;
+       }
+
+       dev_info(gc->dev, "Start MANA service type:%d\n", type);
+       mns_wk->pdev = to_pci_dev(gc->dev);
+       mns_wk->type = type;
+       pci_dev_get(mns_wk->pdev);
+       INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+       schedule_work(&mns_wk->serv_work);
+       return 0;
+}
+
 static void mana_gd_process_eqe(struct gdma_queue *eq)
 {
        u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
        struct gdma_context *gc = eq->gdma_dev->gdma_context;
        struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
-       struct mana_serv_work *mns_wk;
        union gdma_eqe_info eqe_info;
        enum gdma_eqe_type type;
        struct gdma_event event;
@@ -623,30 +647,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
                                 "Service is to be processed in probe\n");
                        break;
                }
-
-               if (gc->in_service) {
-                       dev_info(gc->dev, "Already in service\n");
-                       break;
-               }
-
-               if (!try_module_get(THIS_MODULE)) {
-                       dev_info(gc->dev, "Module is unloading\n");
-                       break;
-               }
-
-               mns_wk = kzalloc_obj(*mns_wk, GFP_ATOMIC);
-               if (!mns_wk) {
-                       module_put(THIS_MODULE);
-                       break;
-               }
-
-               dev_info(gc->dev, "Start MANA service type:%d\n", type);
-               gc->in_service = true;
-               mns_wk->pdev = to_pci_dev(gc->dev);
-               mns_wk->type = type;
-               pci_dev_get(mns_wk->pdev);
-               INIT_WORK(&mns_wk->serv_work, mana_serv_func);
-               schedule_work(&mns_wk->serv_work);
+               mana_schedule_serv_work(gc, type);
                break;
 
        default:
index 933e9d681dedba951f2f8a780628ef6b347aa8cb..56ee993e3a4360c4d17345b5d8f3892db6067c66 100644 (file)
@@ -875,7 +875,7 @@ static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue)
        struct gdma_context *gc = ac->gdma_dev->gdma_context;
 
        /* Already in service, hence tx queue reset is not required.*/
-       if (gc->in_service)
+       if (test_bit(GC_IN_SERVICE, &gc->flags))
                return;
 
        /* Note: If there are pending queue reset work for this port(apc),
@@ -3525,6 +3525,7 @@ static void mana_gf_stats_work_handler(struct work_struct *work)
 {
        struct mana_context *ac =
                container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
+       struct gdma_context *gc = ac->gdma_dev->gdma_context;
        int err;
 
        err = mana_query_gf_stats(ac);
@@ -3532,6 +3533,12 @@ static void mana_gf_stats_work_handler(struct work_struct *work)
                /* HWC timeout detected - reset stats and stop rescheduling */
                ac->hwc_timeout_occurred = true;
                memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
+               dev_warn(gc->dev,
+                        "Gf stats wk handler: gf stats query timed out.\n");
+               /* As HWC timed out, indicating a faulty HW state and needs a
+                * reset.
+                */
+               mana_schedule_serv_work(gc, GDMA_EQE_HWC_RESET_REQUEST);
                return;
        }
        schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
index 766f4fb25e266dd9cd21033091188626a1d60018..ec17004b10c0634de096af59dc0c763c58572a1b 100644 (file)
@@ -215,6 +215,12 @@ enum gdma_page_type {
 
 #define GDMA_INVALID_DMA_REGION 0
 
+struct mana_serv_work {
+       struct work_struct serv_work;
+       struct pci_dev *pdev;
+       enum gdma_eqe_type type;
+};
+
 struct gdma_mem_info {
        struct device *dev;
 
@@ -386,6 +392,7 @@ struct gdma_irq_context {
 
 enum gdma_context_flags {
        GC_PROBE_SUCCEEDED      = 0,
+       GC_IN_SERVICE           = 1,
 };
 
 struct gdma_context {
@@ -411,7 +418,6 @@ struct gdma_context {
        u32                     test_event_eq_id;
 
        bool                    is_pf;
-       bool                    in_service;
 
        phys_addr_t             bar0_pa;
        void __iomem            *bar0_va;
@@ -473,6 +479,8 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
 
 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
 
+int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type);
+
 struct gdma_wqe {
        u32 reserved    :24;
        u32 last_vbytes :8;
@@ -615,6 +623,9 @@ enum {
 /* Driver can handle hardware recovery events during probe */
 #define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
 
+/* Driver supports self recovery on Hardware Channel timeouts */
+#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY BIT(25)
+
 #define GDMA_DRV_CAP_FLAGS1 \
        (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
         GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
@@ -628,7 +639,8 @@ enum {
         GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
         GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
         GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
-        GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY)
+        GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \
+        GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY)
 
 #define GDMA_DRV_CAP_FLAGS2 0