]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
hinic3: Tx & Rx configuration
authorFan Gong <gongfan1@huawei.com>
Fri, 12 Sep 2025 06:28:27 +0000 (14:28 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 16 Sep 2025 08:49:06 +0000 (10:49 +0200)
Configure Tx & Rx queue common attributes.

Co-developed-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Fan Gong <gongfan1@huawei.com>
Link: https://patch.msgid.link/22e71492cd7c819fca45200fcf4030c32f4f924d.1757653621.git.zhuyikai1@h-partners.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
drivers/net/ethernet/huawei/hinic3/hinic3_tx.h

index b891290a3d6e9067f3672e66f34c574f28f4a302..20d37670e13380103b64e8149dbf7548e4f70752 100644 (file)
@@ -75,6 +75,21 @@ struct l2nic_cmd_force_pkt_drop {
        u8                   rsvd1[3];
 };
 
+struct l2nic_cmd_set_dcb_state {
+       struct mgmt_msg_head head;
+       u16                  func_id;
+       /* 0 - get dcb state, 1 - set dcb state */
+       u8                   op_code;
+       /* 0 - disable, 1 - enable dcb */
+       u8                   state;
+       /* 0 - disable, 1 - enable dcb */
+       u8                   port_state;
+       u8                   rsvd[7];
+};
+
+/* IEEE 802.1Qaz std */
+#define L2NIC_DCB_COS_MAX     0x8
+
 /* Commands between NIC to fw */
 enum l2nic_cmd {
        /* FUNC CFG */
index dcf49afb6d4d482823a844107a2535a67f7e4997..9e1e219b2508de53f799f07fb75a70050dacdf4f 100644 (file)
@@ -184,6 +184,47 @@ static void hinic3_free_txrxq_resources(struct net_device *netdev,
        q_params->txqs_res = NULL;
 }
 
+static int hinic3_configure_txrxqs(struct net_device *netdev,
+                                  struct hinic3_dyna_txrxq_params *q_params)
+{
+       int err;
+
+       err = hinic3_configure_txqs(netdev, q_params->num_qps,
+                                   q_params->sq_depth, q_params->txqs_res);
+       if (err) {
+               netdev_err(netdev, "Failed to configure txqs\n");
+               return err;
+       }
+
+       err = hinic3_configure_rxqs(netdev, q_params->num_qps,
+                                   q_params->rq_depth, q_params->rxqs_res);
+       if (err) {
+               netdev_err(netdev, "Failed to configure rxqs\n");
+               return err;
+       }
+
+       return 0;
+}
+
+static int hinic3_configure(struct net_device *netdev)
+{
+       struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+       int err;
+
+       netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
+       netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
+       err = hinic3_set_port_mtu(netdev, netdev->mtu);
+       if (err) {
+               netdev_err(netdev, "Failed to set mtu\n");
+               return err;
+       }
+
+       /* Ensure DCB is disabled */
+       hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
+
+       return 0;
+}
+
 static int hinic3_alloc_channel_resources(struct net_device *netdev,
                                          struct hinic3_dyna_qp_params *qp_params,
                                          struct hinic3_dyna_txrxq_params *trxq_params)
@@ -232,14 +273,28 @@ static int hinic3_open_channel(struct net_device *netdev)
                return err;
        }
 
+       err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
+       if (err) {
+               netdev_err(netdev, "Failed to configure txrxqs\n");
+               goto err_free_qp_ctxts;
+       }
+
        err = hinic3_qps_irq_init(netdev);
        if (err) {
                netdev_err(netdev, "Failed to init txrxq irq\n");
                goto err_free_qp_ctxts;
        }
 
+       err = hinic3_configure(netdev);
+       if (err) {
+               netdev_err(netdev, "Failed to init txrxq irq\n");
+               goto err_uninit_qps_irq;
+       }
+
        return 0;
 
+err_uninit_qps_irq:
+       hinic3_qps_irq_uninit(netdev);
 err_free_qp_ctxts:
        hinic3_free_qp_ctxts(nic_dev);
 
@@ -288,7 +343,6 @@ static int hinic3_open(struct net_device *netdev)
 err_uninit_qps:
        hinic3_uninit_qps(nic_dev, &qp_params);
        hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
-
 err_destroy_num_qps:
        hinic3_destroy_num_qps(netdev);
 err_free_nicio_res:
index 5b18764781d4a866f6eb9854b7dc4d997b8206d8..ed70750f5ae87065957643b4b76a1d200f5618ed 100644 (file)
@@ -289,3 +289,28 @@ int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
 
        return pkt_drop.msg_head.status;
 }
+
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
+{
+       struct l2nic_cmd_set_dcb_state dcb_state = {};
+       struct mgmt_msg_params msg_params = {};
+       int err;
+
+       dcb_state.op_code = op_code;
+       dcb_state.state = state;
+       dcb_state.func_id = hinic3_global_func_id(hwdev);
+
+       mgmt_msg_params_init_default(&msg_params, &dcb_state,
+                                    sizeof(dcb_state));
+
+       err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+                                      L2NIC_CMD_QOS_DCB_STATE, &msg_params);
+       if (err || dcb_state.head.status) {
+               dev_err(hwdev->dev,
+                       "Failed to set dcb state, err: %d, status: 0x%x\n",
+                       err, dcb_state.head.status);
+               return -EFAULT;
+       }
+
+       return 0;
+}
index dd1615745f02bcb0e145fbe3d85832837b1e6ba1..719b81e2bc2acf355cd5e37ef71a3d47a1514022 100644 (file)
@@ -52,4 +52,6 @@ int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
                        struct hinic3_sq_attr *attr);
 int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
 
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+
 #endif
index e81f7c19bf63fff3dd1686788e9632526ad6b72d..6cfe3bdd8ee541242bebe11929cfd3666ac20b98 100644 (file)
@@ -85,6 +85,27 @@ static int rx_alloc_mapped_page(struct page_pool *page_pool,
        return 0;
 }
 
+/* Associate fixed completion element to every wqe in the rq. Every rq wqe will
+ * always post completion to the same place.
+ */
+static void rq_associate_cqes(struct hinic3_rxq *rxq)
+{
+       struct hinic3_queue_pages *qpages;
+       struct hinic3_rq_wqe *rq_wqe;
+       dma_addr_t cqe_dma;
+       u32 i;
+
+       qpages = &rxq->rq->wq.qpages;
+
+       for (i = 0; i < rxq->q_depth; i++) {
+               rq_wqe = get_q_element(qpages, i, NULL);
+               cqe_dma = rxq->cqe_start_paddr +
+                         i * sizeof(struct hinic3_rq_cqe);
+               rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma));
+               rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma));
+       }
+}
+
 static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
                           dma_addr_t dma_addr, u16 len)
 {
@@ -445,6 +466,49 @@ void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
        }
 }
 
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+                         u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+       struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+       struct hinic3_dyna_rxq_res *rqres;
+       struct msix_entry *msix_entry;
+       struct hinic3_rxq *rxq;
+       u16 q_id;
+       u32 pkts;
+
+       for (q_id = 0; q_id < num_rq; q_id++) {
+               rxq = &nic_dev->rxqs[q_id];
+               rqres = &rxqs_res[q_id];
+               msix_entry = &nic_dev->qps_msix_entries[q_id];
+
+               rxq->irq_id = msix_entry->vector;
+               rxq->msix_entry_idx = msix_entry->entry;
+               rxq->next_to_update = 0;
+               rxq->next_to_alloc = rqres->next_to_alloc;
+               rxq->q_depth = rq_depth;
+               rxq->delta = rxq->q_depth;
+               rxq->q_mask = rxq->q_depth - 1;
+               rxq->cons_idx = 0;
+
+               rxq->cqe_arr = rqres->cqe_start_vaddr;
+               rxq->cqe_start_paddr = rqres->cqe_start_paddr;
+               rxq->rx_info = rqres->rx_info;
+               rxq->page_pool = rqres->page_pool;
+
+               rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
+
+               rq_associate_cqes(rxq);
+
+               pkts = hinic3_rx_fill_buffers(rxq);
+               if (!pkts) {
+                       netdev_err(netdev, "Failed to fill Rx buffer\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
 {
        struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
index ec3f45c3688ae8ca5a6ce11088a842ed464410a2..44ae841a3648967a7c22877dc115bae4bbf04345 100644 (file)
@@ -97,6 +97,8 @@ int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
                          u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
 void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
                          u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+                         u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
 
 #endif
index 3c63fe071999951804fa441fc13f4fb3307d6b54..dea882260b112d6887d2dceebe80c5526e946a74 100644 (file)
@@ -702,6 +702,38 @@ void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
        }
 }
 
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+                         u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+       struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+       struct hinic3_dyna_txq_res *tqres;
+       struct hinic3_txq *txq;
+       u16 q_id;
+       u32 idx;
+
+       for (q_id = 0; q_id < num_sq; q_id++) {
+               txq = &nic_dev->txqs[q_id];
+               tqres = &txqs_res[q_id];
+
+               txq->q_depth = sq_depth;
+               txq->q_mask = sq_depth - 1;
+
+               txq->tx_stop_thrs = min(HINIC3_DEFAULT_STOP_THRS,
+                                       sq_depth / 20);
+               txq->tx_start_thrs = min(HINIC3_DEFAULT_START_THRS,
+                                        sq_depth / 10);
+
+               txq->tx_info = tqres->tx_info;
+               for (idx = 0; idx < sq_depth; idx++)
+                       txq->tx_info[idx].dma_info =
+                               &tqres->bds[idx * HINIC3_BDS_PER_SQ_WQEBB];
+
+               txq->sq = &nic_dev->nic_io->sq[q_id];
+       }
+
+       return 0;
+}
+
 bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
 {
        struct net_device *netdev = txq->netdev;
index 9ec6968b66887d71d120e0039b9a68d82556192c..7e1b872ba752f59519e110582c0c07c322422c80 100644 (file)
@@ -137,6 +137,8 @@ int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
                          u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
 void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
                          u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+                         u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
 
 netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);