]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: hibmcge: Implement rx_poll function to receive packets
authorJijie Shao <shaojijie@huawei.com>
Tue, 15 Oct 2024 12:35:13 +0000 (20:35 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Mon, 21 Oct 2024 09:26:53 +0000 (11:26 +0200)
Implement rx_poll function to read the rx descriptor after
receiving the rx interrupt. Adjust the skb based on the
descriptor to complete the reception of the packet.

Signed-off-by: Jijie Shao <shaojijie@huawei.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c

index cceb674bd1c8edf6ddbbdc0334f2d25ed6d3ec88..96daf058d387cde9d2add987b1dab0ff3a85550f 100644 (file)
 #define HBG_VECTOR_NUM                 4
 #define HBG_PCU_CACHE_LINE_SIZE                32
 #define HBG_TX_TIMEOUT_BUF_LEN         1024
+#define HBG_RX_DESCR                   0x01
+
+#define HBG_PACKET_HEAD_SIZE   ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
+                                 HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
 
 enum hbg_dir {
        HBG_DIR_TX = 1 << 0,
@@ -121,6 +125,7 @@ struct hbg_priv {
        struct hbg_mac mac;
        struct hbg_vector vectors;
        struct hbg_ring tx_ring;
+       struct hbg_ring rx_ring;
 };
 
 #endif
index 05c71887b25a2452a11de64a9923e32afb533bc0..05295c2ad43923eea009380289d143e45c2a16d3 100644 (file)
@@ -74,6 +74,7 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
                return -EADDRNOTAVAIL;
 
        specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu;
+       specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len;
        return 0;
 }
 
@@ -178,6 +179,10 @@ u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir)
                return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
                                          HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M);
 
+       if (dir & HBG_DIR_RX)
+               return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
+                                         HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M);
+
        return 0;
 }
 
@@ -189,6 +194,11 @@ void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc)
        hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3);
 }
 
+void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
+{
+       hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr);
+}
+
 void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
 {
        hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
index 508e41cce41ef047ce5bf0708cecea38734e191f..14fb39241c93ff5e8984994d187eff4477a07998 100644 (file)
@@ -54,5 +54,6 @@ void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
 void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
 u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
 void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
+void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
 
 #endif
index fed5502e51c8c372d9515ed2295bb3490129c08b..25dd25f096fe0e9f65242c485760b20430bb1aa6 100644 (file)
@@ -19,13 +19,19 @@ static void hbg_irq_handle_tx(struct hbg_priv *priv,
        napi_schedule(&priv->tx_ring.napi);
 }
 
+static void hbg_irq_handle_rx(struct hbg_priv *priv,
+                             struct hbg_irq_info *irq_info)
+{
+       napi_schedule(&priv->rx_ring.napi);
+}
+
 #define HBG_TXRX_IRQ_I(name, handle) \
        {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
 #define HBG_ERR_IRQ_I(name, need_print) \
        {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
 
 static struct hbg_irq_info hbg_irqs[] = {
-       HBG_TXRX_IRQ_I(RX, NULL),
+       HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
        HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
        HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
        HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
index 0b7926a2a4fa0cfd0ac76d03d46481a84c86be6b..57d81c6d763321db57e96b846c0fdec297f82223 100644 (file)
 #define HBG_REG_MAX_FRAME_LEN_M                        GENMASK(15, 0)
 #define HBG_REG_CF_CFF_DATA_NUM_ADDR           (HBG_REG_SGMII_BASE + 0x045C)
 #define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M      GENMASK(8, 0)
+#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M      GENMASK(24, 16)
 #define HBG_REG_TX_CFF_ADDR_0_ADDR             (HBG_REG_SGMII_BASE + 0x0488)
 #define HBG_REG_TX_CFF_ADDR_1_ADDR             (HBG_REG_SGMII_BASE + 0x048C)
 #define HBG_REG_TX_CFF_ADDR_2_ADDR             (HBG_REG_SGMII_BASE + 0x0490)
 #define HBG_REG_TX_CFF_ADDR_3_ADDR             (HBG_REG_SGMII_BASE + 0x0494)
+#define HBG_REG_RX_CFF_ADDR_ADDR               (HBG_REG_SGMII_BASE + 0x04A0)
 #define HBG_REG_RX_BUF_SIZE_ADDR               (HBG_REG_SGMII_BASE + 0x04E4)
 #define HBG_REG_RX_BUF_SIZE_M                  GENMASK(15, 0)
 #define HBG_REG_BUS_CTRL_ADDR                  (HBG_REG_SGMII_BASE + 0x04E8)
@@ -127,4 +129,15 @@ struct hbg_tx_desc {
 #define HBG_TX_DESC_W0_l4_CS_B         BIT(0)
 #define HBG_TX_DESC_W1_SEND_LEN_M      GENMASK(19, 4)
 
+struct hbg_rx_desc {
+       u32 word0;
+       u32 word1; /* tag */
+       u32 word2;
+       u32 word3;
+       u32 word4;
+       u32 word5;
+};
+
+#define HBG_RX_DESC_W2_PKT_LEN_M       GENMASK(31, 16)
+
 #endif
index 6f828d7e77b34d6394aea40c710ff943733900e5..f4f256a0dfea419ac61e40b831ea7b869883a9ec 100644 (file)
        _r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
 #define hbg_queue_is_empty(head, tail, ring) \
        (hbg_queue_used_num((head), (tail), (ring)) == 0)
+#define hbg_queue_is_full(head, tail, ring) \
+       (hbg_queue_left_num((head), (tail), (ring)) == 0)
 #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
+#define hbg_queue_move_next(p, ring) ({ \
+       typeof(ring) _ring = (ring); \
+       _ring->p = hbg_queue_next_prt(_ring->p, _ring); })
 
 #define HBG_TX_STOP_THRS       2
 #define HBG_TX_START_THRS      (2 * HBG_TX_STOP_THRS)
@@ -124,6 +129,20 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
        buffer->skb = NULL;
 }
 
+static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
+{
+       u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
+       struct hbg_priv *priv = buffer->priv;
+
+       buffer->skb = netdev_alloc_skb(priv->netdev, len);
+       if (unlikely(!buffer->skb))
+               return -ENOMEM;
+
+       buffer->skb_len = len;
+       memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
+       return 0;
+}
+
 static void hbg_buffer_free(struct hbg_buffer *buffer)
 {
        hbg_dma_unmap(buffer);
@@ -176,6 +195,92 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
        return packet_done;
 }
 
+static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
+{
+       struct hbg_ring *ring = &priv->rx_ring;
+       struct hbg_buffer *buffer;
+       int ret;
+
+       if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
+               return 0;
+
+       buffer = &ring->queue[ring->ntu];
+       ret = hbg_buffer_alloc_skb(buffer);
+       if (unlikely(ret))
+               return ret;
+
+       ret = hbg_dma_map(buffer);
+       if (unlikely(ret)) {
+               hbg_buffer_free_skb(buffer);
+               return ret;
+       }
+
+       hbg_hw_fill_buffer(priv, buffer->skb_dma);
+       hbg_queue_move_next(ntu, ring);
+       return 0;
+}
+
+static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
+                                 struct hbg_buffer *buffer)
+{
+       struct hbg_rx_desc *rx_desc;
+
+       /* make sure HW write desc complete */
+       dma_rmb();
+
+       dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
+                               buffer->skb_len, DMA_FROM_DEVICE);
+
+       rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+       return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
+}
+
+static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
+       struct hbg_priv *priv = ring->priv;
+       struct hbg_rx_desc *rx_desc;
+       struct hbg_buffer *buffer;
+       u32 packet_done = 0;
+       u32 pkt_len;
+
+       while (packet_done < budget) {
+               if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
+                       break;
+
+               buffer = &ring->queue[ring->ntc];
+               if (unlikely(!buffer->skb))
+                       goto next_buffer;
+
+               if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
+                       break;
+               rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+               pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
+
+               hbg_dma_unmap(buffer);
+
+               skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
+               skb_put(buffer->skb, pkt_len);
+               buffer->skb->protocol = eth_type_trans(buffer->skb,
+                                                      priv->netdev);
+
+               dev_sw_netstats_rx_add(priv->netdev, pkt_len);
+               napi_gro_receive(napi, buffer->skb);
+               buffer->skb = NULL;
+
+next_buffer:
+               hbg_rx_fill_one_buffer(priv);
+               hbg_queue_move_next(ntc, ring);
+               packet_done++;
+       }
+
+       if (likely(packet_done < budget &&
+                  napi_complete_done(napi, packet_done)))
+               hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true);
+
+       return packet_done;
+}
+
 static void hbg_ring_uninit(struct hbg_ring *ring)
 {
        struct hbg_buffer *buffer;
@@ -232,7 +337,11 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
        ring->ntu = 0;
        ring->len = len;
 
-       netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
+       if (dir == HBG_DIR_TX)
+               netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
+       else
+               netif_napi_add(priv->netdev, &ring->napi, napi_poll);
+
        napi_enable(&ring->napi);
        return 0;
 }
@@ -252,14 +361,43 @@ static int hbg_tx_ring_init(struct hbg_priv *priv)
        return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX);
 }
 
+static int hbg_rx_ring_init(struct hbg_priv *priv)
+{
+       int ret;
+       u32 i;
+
+       ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < priv->rx_ring.len - 1; i++) {
+               ret = hbg_rx_fill_one_buffer(priv);
+               if (ret) {
+                       hbg_ring_uninit(&priv->rx_ring);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 int hbg_txrx_init(struct hbg_priv *priv)
 {
        int ret;
 
        ret = hbg_tx_ring_init(priv);
-       if (ret)
+       if (ret) {
                dev_err(&priv->pdev->dev,
                        "failed to init tx ring, ret = %d\n", ret);
+               return ret;
+       }
+
+       ret = hbg_rx_ring_init(priv);
+       if (ret) {
+               dev_err(&priv->pdev->dev,
+                       "failed to init rx ring, ret = %d\n", ret);
+               hbg_ring_uninit(&priv->tx_ring);
+       }
 
        return ret;
 }
@@ -267,4 +405,5 @@ int hbg_txrx_init(struct hbg_priv *priv)
 void hbg_txrx_uninit(struct hbg_priv *priv)
 {
        hbg_ring_uninit(&priv->tx_ring);
+       hbg_ring_uninit(&priv->rx_ring);
 }