]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
hinic3: Queue pair context initialization
authorFan Gong <gongfan1@huawei.com>
Fri, 12 Sep 2025 06:28:26 +0000 (14:28 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 16 Sep 2025 08:49:06 +0000 (10:49 +0200)
Initialize queue pair context of hardware interaction.

Co-developed-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Fan Gong <gongfan1@huawei.com>
Link: https://patch.msgid.link/92b9c23f21cd37fb30066c7f075ec196e11f1fb2.1757653621.git.zhuyikai1@h-partners.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h

index e2ed167423fcf8ad18c5bbb03d94864cf7c03f79..89638813df4066893e3238840e9bf3647a824fec 100644 (file)
@@ -336,3 +336,91 @@ int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
 
        return ret;
 }
+
+static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx)
+{
+       /* Supported RX buffer sizes in bytes. Configured by array index. */
+       static const int supported_sizes[16] = {
+               [0] = 32,     [1] = 64,     [2] = 96,     [3] = 128,
+               [4] = 192,    [5] = 256,    [6] = 384,    [7] = 512,
+               [8] = 768,    [9] = 1024,   [10] = 1536,  [11] = 2048,
+               [12] = 3072,  [13] = 4096,  [14] = 8192,  [15] = 16384,
+       };
+       u16 idx;
+
+       /* Scan from biggest to smallest. Choose supported size that is equal or
+        * smaller. For smaller value HW will under-utilize posted buffers. For
+        * bigger value HW may overrun posted buffers.
+        */
+       idx = ARRAY_SIZE(supported_sizes);
+       while (idx > 0) {
+               idx--;
+               if (supported_sizes[idx] <= rx_buf_sz) {
+                       *buf_sz_idx = idx;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+                        int rx_buf_sz)
+{
+       struct comm_cmd_set_root_ctxt root_ctxt = {};
+       struct mgmt_msg_params msg_params = {};
+       u16 buf_sz_idx;
+       int err;
+
+       err = get_hw_rx_buf_size_idx(rx_buf_sz, &buf_sz_idx);
+       if (err)
+               return err;
+
+       root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+       root_ctxt.set_cmdq_depth = 0;
+       root_ctxt.cmdq_depth = 0;
+
+       root_ctxt.lro_en = 1;
+
+       root_ctxt.rq_depth  = ilog2(rq_depth);
+       root_ctxt.rx_buf_sz = buf_sz_idx;
+       root_ctxt.sq_depth  = ilog2(sq_depth);
+
+       mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+                                    sizeof(root_ctxt));
+
+       err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+                                      COMM_CMD_SET_VAT, &msg_params);
+       if (err || root_ctxt.head.status) {
+               dev_err(hwdev->dev,
+                       "Failed to set root context, err: %d, status: 0x%x\n",
+                       err, root_ctxt.head.status);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev)
+{
+       struct comm_cmd_set_root_ctxt root_ctxt = {};
+       struct mgmt_msg_params msg_params = {};
+       int err;
+
+       root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+       mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+                                    sizeof(root_ctxt));
+
+       err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+                                      COMM_CMD_SET_VAT, &msg_params);
+       if (err || root_ctxt.head.status) {
+               dev_err(hwdev->dev,
+                       "Failed to set root context, err: %d, status: 0x%x\n",
+                       err, root_ctxt.head.status);
+               return -EFAULT;
+       }
+
+       return 0;
+}
index 35b93e36e004405d1ba61c723fd48955acdbd1fb..304f5691f0c2dcb4ffe3fd6657879bb9b9ec4d7f 100644 (file)
@@ -40,5 +40,8 @@ int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
                            u32 page_size);
 int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth);
 int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev);
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+                        int rx_buf_sz);
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev);
 
 #endif
index c4434efdc7f77da0ca1374a1ad50d21befe368e5..b891290a3d6e9067f3672e66f34c574f28f4a302 100644 (file)
@@ -56,6 +56,19 @@ struct l2nic_cmd_update_mac {
        u8                   new_mac[ETH_ALEN];
 };
 
+struct l2nic_cmd_set_ci_attr {
+       struct mgmt_msg_head msg_head;
+       u16                  func_idx;
+       u8                   dma_attr_off;
+       u8                   pending_limit;
+       u8                   coalescing_time;
+       u8                   intr_en;
+       u16                  intr_idx;
+       u32                  l2nic_sqn;
+       u32                  rsvd;
+       u64                  ci_addr;
+};
+
 struct l2nic_cmd_force_pkt_drop {
        struct mgmt_msg_head msg_head;
        u8                   port;
@@ -82,6 +95,13 @@ enum l2nic_cmd {
        L2NIC_CMD_MAX                 = 256,
 };
 
+/* NIC CMDQ MODE */
+enum l2nic_ucode_cmd {
+       L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX  = 0,
+       L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX   = 1,
+       L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4,
+};
+
 enum hinic3_nic_feature_cap {
        HINIC3_NIC_F_CSUM           = BIT(0),
        HINIC3_NIC_F_SCTP_CRC       = BIT(1),
index 8c671089722f9b4122933ac431ecee560e51393e..dcf49afb6d4d482823a844107a2535a67f7e4997 100644 (file)
@@ -221,6 +221,39 @@ static void hinic3_free_channel_resources(struct net_device *netdev,
        hinic3_free_qps(nic_dev, qp_params);
 }
 
+static int hinic3_open_channel(struct net_device *netdev)
+{
+       struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+       int err;
+
+       err = hinic3_init_qp_ctxts(nic_dev);
+       if (err) {
+               netdev_err(netdev, "Failed to init qps\n");
+               return err;
+       }
+
+       err = hinic3_qps_irq_init(netdev);
+       if (err) {
+               netdev_err(netdev, "Failed to init txrxq irq\n");
+               goto err_free_qp_ctxts;
+       }
+
+       return 0;
+
+err_free_qp_ctxts:
+       hinic3_free_qp_ctxts(nic_dev);
+
+       return err;
+}
+
+static void hinic3_close_channel(struct net_device *netdev)
+{
+       struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+       hinic3_qps_irq_uninit(netdev);
+       hinic3_free_qp_ctxts(nic_dev);
+}
+
 static int hinic3_open(struct net_device *netdev)
 {
        struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -246,8 +279,16 @@ static int hinic3_open(struct net_device *netdev)
 
        hinic3_init_qps(nic_dev, &qp_params);
 
+       err = hinic3_open_channel(netdev);
+       if (err)
+               goto err_uninit_qps;
+
        return 0;
 
+err_uninit_qps:
+       hinic3_uninit_qps(nic_dev, &qp_params);
+       hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+
 err_destroy_num_qps:
        hinic3_destroy_num_qps(netdev);
 err_free_nicio_res:
@@ -261,6 +302,7 @@ static int hinic3_close(struct net_device *netdev)
        struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
        struct hinic3_dyna_qp_params qp_params;
 
+       hinic3_close_channel(netdev);
        hinic3_uninit_qps(nic_dev, &qp_params);
        hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
 
index 049f9536cb86e5f26eaf71f849103cd422cb022e..5b18764781d4a866f6eb9854b7dc4d997b8206d8 100644 (file)
@@ -229,6 +229,41 @@ int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
                        err, mac_info.msg_head.status);
                return -EIO;
        }
+
+       return 0;
+}
+
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, struct hinic3_sq_attr *attr)
+{
+       struct l2nic_cmd_set_ci_attr cons_idx_attr = {};
+       struct mgmt_msg_params msg_params = {};
+       int err;
+
+       cons_idx_attr.func_idx = hinic3_global_func_id(hwdev);
+       cons_idx_attr.dma_attr_off  = attr->dma_attr_off;
+       cons_idx_attr.pending_limit = attr->pending_limit;
+       cons_idx_attr.coalescing_time  = attr->coalescing_time;
+
+       if (attr->intr_en) {
+               cons_idx_attr.intr_en = attr->intr_en;
+               cons_idx_attr.intr_idx = attr->intr_idx;
+       }
+
+       cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+       cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+       mgmt_msg_params_init_default(&msg_params, &cons_idx_attr,
+                                    sizeof(cons_idx_attr));
+
+       err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+                                      L2NIC_CMD_SET_SQ_CI_ATTR, &msg_params);
+       if (err || cons_idx_attr.msg_head.status) {
+               dev_err(hwdev->dev,
+                       "Failed to set ci attribute table, err: %d, status: 0x%x\n",
+                       err, cons_idx_attr.msg_head.status);
+               return -EFAULT;
+       }
+
        return 0;
 }
 
index 6b6851650a37fa2cb93ac4df38c623f3974dbfc5..dd1615745f02bcb0e145fbe3d85832837b1e6ba1 100644 (file)
@@ -22,6 +22,16 @@ enum hinic3_nic_event_type {
        HINIC3_NIC_EVENT_LINK_UP   = 1,
 };
 
+struct hinic3_sq_attr {
+       u8  dma_attr_off;
+       u8  pending_limit;
+       u8  coalescing_time;
+       u8  intr_en;
+       u16 intr_idx;
+       u32 l2nic_sqn;
+       u64 ci_dma_base;
+};
+
 int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
 int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
 bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
@@ -38,6 +48,8 @@ int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
 int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
                      u8 *new_mac, u16 vlan_id, u16 func_id);
 
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
+                       struct hinic3_sq_attr *attr);
 int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
 
 #endif
index 8f06ff5c377d1cbd6b8af62ba4f6ce00d57d617a..d86cd1ba46056f0d980f6cec7cba759ebe7ac818 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 
+#include "hinic3_cmdq.h"
 #include "hinic3_hw_comm.h"
 #include "hinic3_hw_intf.h"
 #include "hinic3_hwdev.h"
@@ -9,6 +10,11 @@
 #include "hinic3_nic_dev.h"
 #include "hinic3_nic_io.h"
 
+#define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT    1
+#define HINIC3_DEFAULT_TX_CI_COALESCING_TIME  1
+#define HINIC3_DEFAULT_DROP_THD_ON            (0xFFFF)
+#define HINIC3_DEFAULT_DROP_THD_OFF           0
+
 #define HINIC3_CI_Q_ADDR_SIZE                (64)
 
 #define HINIC3_CI_TABLE_SIZE(num_qps)  \
 #define HINIC3_CI_VADDR(base_addr, q_id)  \
        ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
 
+#define HINIC3_CI_PADDR(base_paddr, q_id)  \
+       ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define SQ_WQ_PREFETCH_MAX        1
+#define SQ_WQ_PREFETCH_MIN        1
+#define SQ_WQ_PREFETCH_THRESHOLD  16
+
+#define RQ_WQ_PREFETCH_MAX        4
+#define RQ_WQ_PREFETCH_MIN        1
+#define RQ_WQ_PREFETCH_THRESHOLD  256
+
+/* (2048 - 8) / 64 */
+#define HINIC3_Q_CTXT_MAX         31
+
+enum hinic3_qp_ctxt_type {
+       HINIC3_QP_CTXT_TYPE_SQ = 0,
+       HINIC3_QP_CTXT_TYPE_RQ = 1,
+};
+
+struct hinic3_qp_ctxt_hdr {
+       __le16 num_queues;
+       __le16 queue_type;
+       __le16 start_qid;
+       __le16 rsvd;
+};
+
+struct hinic3_sq_ctxt {
+       __le32 ci_pi;
+       __le32 drop_mode_sp;
+       __le32 wq_pfn_hi_owner;
+       __le32 wq_pfn_lo;
+
+       __le32 rsvd0;
+       __le32 pkt_drop_thd;
+       __le32 global_sq_id;
+       __le32 vlan_ceq_attr;
+
+       __le32 pref_cache;
+       __le32 pref_ci_owner;
+       __le32 pref_wq_pfn_hi_ci;
+       __le32 pref_wq_pfn_lo;
+
+       __le32 rsvd8;
+       __le32 rsvd9;
+       __le32 wq_block_pfn_hi;
+       __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_rq_ctxt {
+       __le32 ci_pi;
+       __le32 ceq_attr;
+       __le32 wq_pfn_hi_type_owner;
+       __le32 wq_pfn_lo;
+
+       __le32 rsvd[3];
+       __le32 cqe_sge_len;
+
+       __le32 pref_cache;
+       __le32 pref_ci_owner;
+       __le32 pref_wq_pfn_hi_ci;
+       __le32 pref_wq_pfn_lo;
+
+       __le32 pi_paddr_hi;
+       __le32 pi_paddr_lo;
+       __le32 wq_block_pfn_hi;
+       __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_sq_ctxt_block {
+       struct hinic3_qp_ctxt_hdr cmdq_hdr;
+       struct hinic3_sq_ctxt     sq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_rq_ctxt_block {
+       struct hinic3_qp_ctxt_hdr cmdq_hdr;
+       struct hinic3_rq_ctxt     rq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_clean_queue_ctxt {
+       struct hinic3_qp_ctxt_hdr cmdq_hdr;
+       __le32                    rsvd;
+};
+
+#define SQ_CTXT_SIZE(num_sqs)  \
+       (sizeof(struct hinic3_qp_ctxt_hdr) +  \
+       (num_sqs) * sizeof(struct hinic3_sq_ctxt))
+
+#define RQ_CTXT_SIZE(num_rqs)  \
+       (sizeof(struct hinic3_qp_ctxt_hdr) +  \
+       (num_rqs) * sizeof(struct hinic3_rq_ctxt))
+
+#define SQ_CTXT_PREF_CI_HI_SHIFT           12
+#define SQ_CTXT_PREF_CI_HI(val)            ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT)
+
+#define SQ_CTXT_PI_IDX_MASK                GENMASK(15, 0)
+#define SQ_CTXT_CI_IDX_MASK                GENMASK(31, 16)
+#define SQ_CTXT_CI_PI_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_MODE_SP_FLAG_MASK          BIT(0)
+#define SQ_CTXT_MODE_PKT_DROP_MASK         BIT(1)
+#define SQ_CTXT_MODE_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK        GENMASK(19, 0)
+#define SQ_CTXT_WQ_PAGE_OWNER_MASK         BIT(23)
+#define SQ_CTXT_WQ_PAGE_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define SQ_CTXT_PKT_DROP_THD_ON_MASK       GENMASK(15, 0)
+#define SQ_CTXT_PKT_DROP_THD_OFF_MASK      GENMASK(31, 16)
+#define SQ_CTXT_PKT_DROP_THD_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val)
+
+#define SQ_CTXT_GLOBAL_SQ_ID_MASK          GENMASK(12, 0)
+#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_VLAN_INSERT_MODE_MASK      GENMASK(20, 19)
+#define SQ_CTXT_VLAN_CEQ_EN_MASK           BIT(23)
+#define SQ_CTXT_VLAN_CEQ_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK  GENMASK(13, 0)
+#define SQ_CTXT_PREF_CACHE_MAX_MASK        GENMASK(24, 14)
+#define SQ_CTXT_PREF_CACHE_MIN_MASK        GENMASK(31, 25)
+
+#define SQ_CTXT_PREF_CI_HI_MASK            GENMASK(3, 0)
+#define SQ_CTXT_PREF_OWNER_MASK            BIT(4)
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK        GENMASK(19, 0)
+#define SQ_CTXT_PREF_CI_LOW_MASK           GENMASK(31, 20)
+#define SQ_CTXT_PREF_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK       GENMASK(22, 0)
+#define SQ_CTXT_WQ_BLOCK_SET(val, member)  \
+       FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define RQ_CTXT_PI_IDX_MASK                GENMASK(15, 0)
+#define RQ_CTXT_CI_IDX_MASK                GENMASK(31, 16)
+#define RQ_CTXT_CI_PI_SET(val, member)  \
+       FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_CEQ_ATTR_INTR_MASK         GENMASK(30, 21)
+#define RQ_CTXT_CEQ_ATTR_EN_MASK           BIT(31)
+#define RQ_CTXT_CEQ_ATTR_SET(val, member)  \
+       FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK        GENMASK(19, 0)
+#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK      GENMASK(29, 28)
+#define RQ_CTXT_WQ_PAGE_OWNER_MASK         BIT(31)
+#define RQ_CTXT_WQ_PAGE_SET(val, member)  \
+       FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define RQ_CTXT_CQE_LEN_MASK               GENMASK(29, 28)
+#define RQ_CTXT_CQE_LEN_SET(val, member)  \
+       FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK  GENMASK(13, 0)
+#define RQ_CTXT_PREF_CACHE_MAX_MASK        GENMASK(24, 14)
+#define RQ_CTXT_PREF_CACHE_MIN_MASK        GENMASK(31, 25)
+
+#define RQ_CTXT_PREF_CI_HI_MASK            GENMASK(3, 0)
+#define RQ_CTXT_PREF_OWNER_MASK            BIT(4)
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK        GENMASK(19, 0)
+#define RQ_CTXT_PREF_CI_LOW_MASK           GENMASK(31, 20)
+#define RQ_CTXT_PREF_SET(val, member)  \
+       FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK       GENMASK(22, 0)
+#define RQ_CTXT_WQ_BLOCK_SET(val, member)  \
+       FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define WQ_PAGE_PFN_SHIFT       12
+#define WQ_BLOCK_PFN_SHIFT      9
+#define WQ_PAGE_PFN(page_addr)  ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
 int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
 {
        struct hinic3_hwdev *hwdev = nic_dev->hwdev;
@@ -304,3 +490,396 @@ void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
        qp_params->rqs = nic_io->rq;
        qp_params->num_qps = nic_io->num_qps;
 }
+
+static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr,
+                                         enum hinic3_qp_ctxt_type ctxt_type,
+                                         u16 num_queues, u16 q_id)
+{
+       qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type);
+       qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues);
+       qp_ctxt_hdr->start_qid = cpu_to_le16(q_id);
+       qp_ctxt_hdr->rsvd = 0;
+}
+
+static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id,
+                                  struct hinic3_sq_ctxt *sq_ctxt)
+{
+       u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+       u32 wq_block_pfn_hi, wq_block_pfn_lo;
+       u32 wq_page_pfn_hi, wq_page_pfn_lo;
+       u16 pi_start, ci_start;
+
+       ci_start = hinic3_get_sq_local_ci(sq);
+       pi_start = hinic3_get_sq_local_pi(sq);
+
+       wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq);
+
+       wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+       wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+       wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+       wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr);
+       wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+       wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+       sq_ctxt->ci_pi =
+               cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+                           SQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+       sq_ctxt->drop_mode_sp =
+               cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) |
+                           SQ_CTXT_MODE_SET(0, PKT_DROP));
+
+       sq_ctxt->wq_pfn_hi_owner =
+               cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+                           SQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+       sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+       sq_ctxt->pkt_drop_thd =
+               cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) |
+                           SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF));
+
+       sq_ctxt->global_sq_id =
+               cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id,
+                                                       GLOBAL_SQ_ID));
+
+       /* enable insert c-vlan by default */
+       sq_ctxt->vlan_ceq_attr =
+               cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) |
+                           SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE));
+
+       sq_ctxt->rsvd0 = 0;
+
+       sq_ctxt->pref_cache =
+               cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+                           SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+                           SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+       sq_ctxt->pref_ci_owner =
+               cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+                           SQ_CTXT_PREF_SET(1, OWNER));
+
+       sq_ctxt->pref_wq_pfn_hi_ci =
+               cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) |
+                           SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI));
+
+       sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+       sq_ctxt->wq_block_pfn_hi =
+               cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+       sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
+                                              u32 *wq_page_pfn_hi,
+                                              u32 *wq_page_pfn_lo,
+                                              u32 *wq_block_pfn_hi,
+                                              u32 *wq_block_pfn_lo)
+{
+       u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+
+       wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq);
+
+       wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+       *wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+       *wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+       wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr);
+       *wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+       *wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+}
+
+static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
+                                  struct hinic3_rq_ctxt *rq_ctxt)
+{
+       u32 wq_block_pfn_hi, wq_block_pfn_lo;
+       u32 wq_page_pfn_hi, wq_page_pfn_lo;
+       u16 pi_start, ci_start;
+
+       ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+       pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+
+       hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
+                                          &wq_block_pfn_hi, &wq_block_pfn_lo);
+
+       rq_ctxt->ci_pi =
+               cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+                           RQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+       rq_ctxt->ceq_attr =
+               cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+                           RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR));
+
+       rq_ctxt->wq_pfn_hi_type_owner =
+               cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+                           RQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+       /* use 16Byte WQE */
+       rq_ctxt->wq_pfn_hi_type_owner |=
+               cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE));
+       rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN));
+
+       rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+       rq_ctxt->pref_cache =
+               cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+                           RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+                           RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+       rq_ctxt->pref_ci_owner =
+               cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+                           RQ_CTXT_PREF_SET(1, OWNER));
+
+       rq_ctxt->pref_wq_pfn_hi_ci =
+               cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+                           RQ_CTXT_PREF_SET(ci_start, CI_LOW));
+
+       rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+       rq_ctxt->wq_block_pfn_hi =
+               cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+       rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+       struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+       struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+       struct hinic3_sq_ctxt_block *sq_ctxt_block;
+       u16 q_id, curr_id, max_ctxts, i;
+       struct hinic3_sq_ctxt *sq_ctxt;
+       struct hinic3_cmd_buf *cmd_buf;
+       struct hinic3_io_queue *sq;
+       __le64 out_param;
+       int err = 0;
+
+       cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+               return -ENOMEM;
+       }
+
+       q_id = 0;
+       while (q_id < nic_io->num_qps) {
+               sq_ctxt_block = cmd_buf->buf;
+               sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+               max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+                            HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+               hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+                                             HINIC3_QP_CTXT_TYPE_SQ, max_ctxts,
+                                             q_id);
+
+               for (i = 0; i < max_ctxts; i++) {
+                       curr_id = q_id + i;
+                       sq = &nic_io->sq[curr_id];
+                       hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]);
+               }
+
+               hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block));
+
+               cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts));
+               err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+                                             L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+                                             cmd_buf, &out_param);
+               if (err || out_param) {
+                       dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
+                               err, out_param);
+                       err = -EFAULT;
+                       break;
+               }
+
+               q_id += max_ctxts;
+       }
+
+       hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+       return err;
+}
+
+static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+       struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+       struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+       struct hinic3_rq_ctxt_block *rq_ctxt_block;
+       u16 q_id, curr_id, max_ctxts, i;
+       struct hinic3_rq_ctxt *rq_ctxt;
+       struct hinic3_cmd_buf *cmd_buf;
+       struct hinic3_io_queue *rq;
+       __le64 out_param;
+       int err = 0;
+
+       cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+               return -ENOMEM;
+       }
+
+       q_id = 0;
+       while (q_id < nic_io->num_qps) {
+               rq_ctxt_block = cmd_buf->buf;
+               rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+               max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+                               HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+               hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+                                             HINIC3_QP_CTXT_TYPE_RQ, max_ctxts,
+                                             q_id);
+
+               for (i = 0; i < max_ctxts; i++) {
+                       curr_id = q_id + i;
+                       rq = &nic_io->rq[curr_id];
+                       hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
+               }
+
+               hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block));
+
+               cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts));
+
+               err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+                                             L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+                                             cmd_buf, &out_param);
+               if (err || out_param) {
+                       dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
+                               err, out_param);
+                       err = -EFAULT;
+                       break;
+               }
+
+               q_id += max_ctxts;
+       }
+
+       hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+       return err;
+}
+
+static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+       int err;
+
+       err = init_sq_ctxts(nic_dev);
+       if (err)
+               return err;
+
+       err = init_rq_ctxts(nic_dev);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev,
+                                   enum hinic3_qp_ctxt_type ctxt_type)
+{
+       struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+       struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+       struct hinic3_clean_queue_ctxt *ctxt_block;
+       struct hinic3_cmd_buf *cmd_buf;
+       __le64 out_param;
+       int err;
+
+       cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+               return -ENOMEM;
+       }
+
+       ctxt_block = cmd_buf->buf;
+       ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps);
+       ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type);
+       ctxt_block->cmdq_hdr.start_qid = 0;
+       ctxt_block->cmdq_hdr.rsvd = 0;
+       ctxt_block->rsvd = 0;
+
+       hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block));
+
+       cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block));
+
+       err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+                                     L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX,
+                                     cmd_buf, &out_param);
+       if (err || out_param) {
+               dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n",
+                       err, out_param);
+
+               err = -EFAULT;
+       }
+
+       hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+       return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev)
+{
+       /* clean LRO/TSO context space */
+       return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) ||
+              clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ);
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+       struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+       struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+       struct hinic3_sq_attr sq_attr;
+       u32 rq_depth;
+       u16 q_id;
+       int err;
+
+       err = init_qp_ctxts(nic_dev);
+       if (err) {
+               dev_err(hwdev->dev, "Failed to init QP ctxts\n");
+               return err;
+       }
+
+       /* clean LRO/TSO context space */
+       err = clean_qp_offload_ctxt(nic_dev);
+       if (err) {
+               dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n");
+               return err;
+       }
+
+       rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
+
+       err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth,
+                                  nic_io->rx_buf_len);
+       if (err) {
+               dev_err(hwdev->dev, "Failed to set root context\n");
+               return err;
+       }
+
+       for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+               sq_attr.ci_dma_base =
+                       HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2;
+               sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT;
+               sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME;
+               sq_attr.intr_en = 1;
+               sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx;
+               sq_attr.l2nic_sqn = q_id;
+               sq_attr.dma_attr_off = 0;
+               err = hinic3_set_ci_table(hwdev, &sq_attr);
+               if (err) {
+                       dev_err(hwdev->dev, "Failed to set ci table\n");
+                       goto err_clean_root_ctxt;
+               }
+       }
+
+       return 0;
+
+err_clean_root_ctxt:
+       hinic3_clean_root_ctxt(hwdev);
+
+       return err;
+}
+
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+       hinic3_clean_root_ctxt(nic_dev->hwdev);
+}
index c103095c37ef2983a2b9043b11d497585140b1bc..12eefabcf1dbbc4d7f22850f3b9f44b3f04bb427 100644 (file)
@@ -139,4 +139,7 @@ void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
 void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
                       struct hinic3_dyna_qp_params *qp_params);
 
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+
 #endif