]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: hns3: add log for workqueue scheduled late
authorYufeng Mo <moyufeng@huawei.com>
Wed, 24 Nov 2021 01:06:51 +0000 (09:06 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 May 2024 09:50:59 +0000 (11:50 +0200)
[ Upstream commit d9069dab207534d9f6f41993ee78a651733becea ]

When the mbx or reset message arrives, the driver is informed
through an interrupt. This task can be processed only after
the workqueue is scheduled. In some cases, this workqueue
scheduling takes a long time. As a result, the mbx or reset
service task cannot be processed in time. So add some warning
message to improve debugging efficiency for this case.

Signed-off-by: Yufeng Mo <moyufeng@huawei.com>
Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: 669554c512d2 ("net: hns3: direct return when receive a unknown mailbox message")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c

index 277d6d657c42923bc8c4b2f4e1962e562c3d4964..e1ba0ae055b0226d17899fc0af7f3c3eda35b754 100644 (file)
@@ -80,6 +80,9 @@ enum hclge_mbx_tbl_cfg_subcode {
 #define HCLGE_MBX_MAX_RESP_DATA_SIZE   8U
 #define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM     4
 
+#define HCLGE_RESET_SCHED_TIMEOUT      (3 * HZ)
+#define HCLGE_MBX_SCHED_TIMEOUT        (HZ / 2)
+
 struct hclge_ring_chain_param {
        u8 ring_type;
        u8 tqp_index;
index 71b498aa327bb6b638edb375299610cfbd9e6f4e..93e55c6c4cf5eddfab694346b650d5afc541c2d8 100644 (file)
@@ -2855,16 +2855,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
 {
        if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
-           !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+           !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+               hdev->last_mbx_scheduled = jiffies;
                mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+       }
 }
 
 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
 {
        if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
            test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
-           !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+           !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+               hdev->last_rst_scheduled = jiffies;
                mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+       }
 }
 
 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
@@ -3697,6 +3701,13 @@ static void hclge_mailbox_service_task(struct hclge_dev *hdev)
            test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
                return;
 
+       if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+                                  HCLGE_MBX_SCHED_TIMEOUT))
+               dev_warn(&hdev->pdev->dev,
+                        "mbx service task is scheduled after %ums on cpu%u!\n",
+                        jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+                        smp_processor_id());
+
        hclge_mbx_handler(hdev);
 
        clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
@@ -4346,6 +4357,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
        if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
                return;
 
+       if (time_is_before_jiffies(hdev->last_rst_scheduled +
+                                  HCLGE_RESET_SCHED_TIMEOUT))
+               dev_warn(&hdev->pdev->dev,
+                        "reset service task is scheduled after %ums on cpu%u!\n",
+                        jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+                        smp_processor_id());
+
        down(&hdev->reset_sem);
        set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
 
index ba0d41091b1da0bf8509a08e6ae9e7f1226462f2..6870ccc9d9eac88d0a6fba31286390c7ed203219 100644 (file)
@@ -928,6 +928,8 @@ struct hclge_dev {
        u16 hclge_fd_rule_num;
        unsigned long serv_processed_cnt;
        unsigned long last_serv_processed;
+       unsigned long last_rst_scheduled;
+       unsigned long last_mbx_scheduled;
        unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
        enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
        u8 fd_en;
index 5182051e5414db1d7d8c7a5d3b263af5fe835193..ab6df4c1ea0f698153e7884ed5a9f127ab99b283 100644 (file)
@@ -855,6 +855,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
                    req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
                        resp_msg.status = ret;
+                       if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+                                                  HCLGE_MBX_SCHED_TIMEOUT))
+                               dev_warn(&hdev->pdev->dev,
+                                        "resp vport%u mbx(%u,%u) late\n",
+                                        req->mbx_src_vfid,
+                                        req->msg.code,
+                                        req->msg.subcode);
+
                        hclge_gen_resp_to_vf(vport, req, &resp_msg);
                }