]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: hns3: use atomic_t replace u32 for arq's count
authorHuazhong Tan <tanhuazhong@huawei.com>
Thu, 25 Apr 2019 12:42:49 +0000 (20:42 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 31 May 2019 13:44:58 +0000 (06:44 -0700)
[ Upstream commit 30780a8b1677e7409b32ae52a9a84f7d41ae6b43 ]

Since irq handler and mailbox task will both update arq's count,
so arq's count should use atomic_t instead of u32, otherwise
its value may go wrong finally.

Fixes: 07a0556a3a73 ("net: hns3: Changes to support ARQ(Asynchronous Receive Queue)")
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c

index 691d12174902c13dd6fd9fb7a0a26fc1fa6a3273..3c7a26bb83222004435e46e58e5bca4edf1157a6 100644 (file)
@@ -102,7 +102,7 @@ struct hclgevf_mbx_arq_ring {
        struct hclgevf_dev *hdev;
        u32 head;
        u32 tail;
-       u32 count;
+       atomic_t count;
        u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
 };
 
index 4e78e8812a045feb3e58576354be1988f6bcc046..b39ff5555a30e6593a4a3ca4e6587358356a4dbd 100644 (file)
@@ -327,7 +327,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
        hdev->arq.hdev = hdev;
        hdev->arq.head = 0;
        hdev->arq.tail = 0;
-       hdev->arq.count = 0;
+       atomic_set(&hdev->arq.count, 0);
        hdev->hw.cmq.csq.next_to_clean = 0;
        hdev->hw.cmq.csq.next_to_use = 0;
        hdev->hw.cmq.crq.next_to_clean = 0;
index 84653f58b2d1048cc6ff1e53382e46ba3812eea2..fbba8b83b36c9dd4cba3503852f2bd60f485dfec 100644 (file)
@@ -207,7 +207,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                        /* we will drop the async msg if we find ARQ as full
                         * and continue with next message
                         */
-                       if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+                       if (atomic_read(&hdev->arq.count) >=
+                           HCLGE_MBX_MAX_ARQ_MSG_NUM) {
                                dev_warn(&hdev->pdev->dev,
                                         "Async Q full, dropping msg(%d)\n",
                                         req->msg[1]);
@@ -219,7 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                        memcpy(&msg_q[0], req->msg,
                               HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
                        hclge_mbx_tail_ptr_move_arq(hdev->arq);
-                       hdev->arq.count++;
+                       atomic_inc(&hdev->arq.count);
 
                        hclgevf_mbx_task_schedule(hdev);
 
@@ -296,7 +297,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                }
 
                hclge_mbx_head_ptr_move_arq(hdev->arq);
-               hdev->arq.count--;
+               atomic_dec(&hdev->arq.count);
                msg_q = hdev->arq.msg_q[hdev->arq.head];
        }
 }