]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
enic: Move RX functions to their own file
authorJohn Daley <johndale@cisco.com>
Wed, 5 Feb 2025 23:54:13 +0000 (15:54 -0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 7 Feb 2025 01:17:01 +0000 (17:17 -0800)
Move RX handler code into its own file in preparation for further
changes. Some formatting changes were necessary in order to satisfy
checkpatch but there were no functional changes.

Co-developed-by: Nelson Escobar <neescoba@cisco.com>
Signed-off-by: Nelson Escobar <neescoba@cisco.com>
Co-developed-by: Satish Kharat <satishkh@cisco.com>
Signed-off-by: Satish Kharat <satishkh@cisco.com>
Signed-off-by: John Daley <johndale@cisco.com>
Link: https://patch.msgid.link/20250205235416.25410-2-johndale@cisco.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/cisco/enic/Makefile
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/enic_rq.c [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_rq.h [new file with mode: 0644]

index c3b6febfdbe4458b330833ddf9ba39c61be2a2be..b3b5196b2dfcc3e59366474ba78fc7a4cd746eb0 100644 (file)
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
        enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
-       enic_ethtool.o enic_api.o enic_clsf.o
+       enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o
 
index 49f6cab01ed510e7a7710640dcdf9242fe674334..1d9f109346b8c7ea50b22f9785e9acdf3d840063 100644 (file)
@@ -58,6 +58,7 @@
 #include "enic_dev.h"
 #include "enic_pp.h"
 #include "enic_clsf.h"
+#include "enic_rq.h"
 
 #define ENIC_NOTIFY_TIMER_PERIOD       (2 * HZ)
 #define WQ_ENET_MAX_DESC_LEN           (1 << WQ_ENET_LEN_BITS)
@@ -1313,243 +1314,6 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
-       struct enic *enic = vnic_dev_priv(rq->vdev);
-
-       if (!buf->os_buf)
-               return;
-
-       dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
-                        DMA_FROM_DEVICE);
-       dev_kfree_skb_any(buf->os_buf);
-       buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
-       struct enic *enic = vnic_dev_priv(rq->vdev);
-       struct net_device *netdev = enic->netdev;
-       struct sk_buff *skb;
-       unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
-       unsigned int os_buf_index = 0;
-       dma_addr_t dma_addr;
-       struct vnic_rq_buf *buf = rq->to_use;
-
-       if (buf->os_buf) {
-               enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
-                                  buf->len);
-
-               return 0;
-       }
-       skb = netdev_alloc_skb_ip_align(netdev, len);
-       if (!skb) {
-               enic->rq[rq->index].stats.no_skb++;
-               return -ENOMEM;
-       }
-
-       dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
-                                 DMA_FROM_DEVICE);
-       if (unlikely(enic_dma_map_check(enic, dma_addr))) {
-               dev_kfree_skb(skb);
-               return -ENOMEM;
-       }
-
-       enic_queue_rq_desc(rq, skb, os_buf_index,
-               dma_addr, len);
-
-       return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
-                                     u32 pkt_len)
-{
-       if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
-               pkt_size->large_pkt_bytes_cnt += pkt_len;
-       else
-               pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
-                            struct vnic_rq_buf *buf, u16 len)
-{
-       struct enic *enic = netdev_priv(netdev);
-       struct sk_buff *new_skb;
-
-       if (len > enic->rx_copybreak)
-               return false;
-       new_skb = netdev_alloc_skb_ip_align(netdev, len);
-       if (!new_skb)
-               return false;
-       dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
-                               DMA_FROM_DEVICE);
-       memcpy(new_skb->data, (*skb)->data, len);
-       *skb = new_skb;
-
-       return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
-       struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
-       int skipped, void *opaque)
-{
-       struct enic *enic = vnic_dev_priv(rq->vdev);
-       struct net_device *netdev = enic->netdev;
-       struct sk_buff *skb;
-       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-       struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
-
-       u8 type, color, eop, sop, ingress_port, vlan_stripped;
-       u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
-       u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
-       u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
-       u8 packet_error;
-       u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
-       u32 rss_hash;
-       bool outer_csum_ok = true, encap = false;
-
-       rqstats->packets++;
-       if (skipped) {
-               rqstats->desc_skip++;
-               return;
-       }
-
-       skb = buf->os_buf;
-
-       cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
-               &type, &color, &q_number, &completed_index,
-               &ingress_port, &fcoe, &eop, &sop, &rss_type,
-               &csum_not_calc, &rss_hash, &bytes_written,
-               &packet_error, &vlan_stripped, &vlan_tci, &checksum,
-               &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
-               &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
-               &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
-               &fcs_ok);
-
-       if (packet_error) {
-
-               if (!fcs_ok) {
-                       if (bytes_written > 0)
-                               rqstats->bad_fcs++;
-                       else if (bytes_written == 0)
-                               rqstats->pkt_truncated++;
-               }
-
-               dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
-                                DMA_FROM_DEVICE);
-               dev_kfree_skb_any(skb);
-               buf->os_buf = NULL;
-
-               return;
-       }
-
-       if (eop && bytes_written > 0) {
-
-               /* Good receive
-                */
-               rqstats->bytes += bytes_written;
-               if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
-                       buf->os_buf = NULL;
-                       dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
-                                        buf->len, DMA_FROM_DEVICE);
-               }
-               prefetch(skb->data - NET_IP_ALIGN);
-
-               skb_put(skb, bytes_written);
-               skb->protocol = eth_type_trans(skb, netdev);
-               skb_record_rx_queue(skb, q_number);
-               if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
-                   (type == 3)) {
-                       switch (rss_type) {
-                       case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
-                       case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
-                       case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
-                               skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
-                               rqstats->l4_rss_hash++;
-                               break;
-                       case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
-                       case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
-                       case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
-                               skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
-                               rqstats->l3_rss_hash++;
-                               break;
-                       }
-               }
-               if (enic->vxlan.vxlan_udp_port_number) {
-                       switch (enic->vxlan.patch_level) {
-                       case 0:
-                               if (fcoe) {
-                                       encap = true;
-                                       outer_csum_ok = fcoe_fc_crc_ok;
-                               }
-                               break;
-                       case 2:
-                               if ((type == 7) &&
-                                   (rss_hash & BIT(0))) {
-                                       encap = true;
-                                       outer_csum_ok = (rss_hash & BIT(1)) &&
-                                                       (rss_hash & BIT(2));
-                               }
-                               break;
-                       }
-               }
-
-               /* Hardware does not provide whole packet checksum. It only
-                * provides pseudo checksum. Since hw validates the packet
-                * checksum but not provide us the checksum value. use
-                * CHECSUM_UNNECESSARY.
-                *
-                * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
-                * inner csum_ok. outer_csum_ok is set by hw when outer udp
-                * csum is correct or is zero.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
-                   tcp_udp_csum_ok && outer_csum_ok &&
-                   (ipv4_csum_ok || ipv6)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       skb->csum_level = encap;
-                       if (encap)
-                               rqstats->csum_unnecessary_encap++;
-                       else
-                               rqstats->csum_unnecessary++;
-               }
-
-               if (vlan_stripped) {
-                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
-                       rqstats->vlan_stripped++;
-               }
-               skb_mark_napi_id(skb, &enic->napi[rq->index]);
-               if (!(netdev->features & NETIF_F_GRO))
-                       netif_receive_skb(skb);
-               else
-                       napi_gro_receive(&enic->napi[q_number], skb);
-               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
-                       enic_intr_update_pkt_size(&cq->pkt_size_counter,
-                                                 bytes_written);
-       } else {
-
-               /* Buffer overflow
-                */
-               rqstats->pkt_truncated++;
-               dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
-                                DMA_FROM_DEVICE);
-               dev_kfree_skb_any(skb);
-               buf->os_buf = NULL;
-       }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-       u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
-       struct enic *enic = vnic_dev_priv(vdev);
-
-       vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
-               completed_index, VNIC_RQ_RETURN_DESC,
-               enic_rq_indicate_buf, opaque);
-
-       return 0;
-}
-
 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
 {
        unsigned int intr = enic_msix_rq_intr(enic, rq->index);
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644 (file)
index 0000000..e5b2f58
--- /dev/null
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc.  All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD                1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+                                     u32 pkt_len)
+{
+       if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+               pkt_size->large_pkt_bytes_cnt += pkt_len;
+       else
+               pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
+                            struct vnic_rq_buf *buf, u16 len)
+{
+       struct enic *enic = netdev_priv(netdev);
+       struct sk_buff *new_skb;
+
+       if (len > enic->rx_copybreak)
+               return false;
+       new_skb = netdev_alloc_skb_ip_align(netdev, len);
+       if (!new_skb)
+               return false;
+       dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
+                               DMA_FROM_DEVICE);
+       memcpy(new_skb->data, (*skb)->data, len);
+       *skb = new_skb;
+
+       return true;
+}
+
+int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
+                   u16 q_number, u16 completed_index, void *opaque)
+{
+       struct enic *enic = vnic_dev_priv(vdev);
+
+       vnic_rq_service(&enic->rq[q_number].vrq, cq_desc, completed_index,
+                       VNIC_RQ_RETURN_DESC, enic_rq_indicate_buf, opaque);
+       return 0;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+       struct enic *enic = vnic_dev_priv(rq->vdev);
+       struct net_device *netdev = enic->netdev;
+       struct sk_buff *skb;
+       unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+       unsigned int os_buf_index = 0;
+       dma_addr_t dma_addr;
+       struct vnic_rq_buf *buf = rq->to_use;
+
+       if (buf->os_buf) {
+               enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+                                  buf->len);
+
+               return 0;
+       }
+       skb = netdev_alloc_skb_ip_align(netdev, len);
+       if (!skb) {
+               enic->rq[rq->index].stats.no_skb++;
+               return -ENOMEM;
+       }
+
+       dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
+                                 DMA_FROM_DEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr))) {
+               dev_kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       enic_queue_rq_desc(rq, skb, os_buf_index, dma_addr, len);
+
+       return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+       struct enic *enic = vnic_dev_priv(rq->vdev);
+
+       if (!buf->os_buf)
+               return;
+
+       dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+                        DMA_FROM_DEVICE);
+       dev_kfree_skb_any(buf->os_buf);
+       buf->os_buf = NULL;
+}
+
+void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
+                         struct vnic_rq_buf *buf, int skipped, void *opaque)
+{
+       struct enic *enic = vnic_dev_priv(rq->vdev);
+       struct net_device *netdev = enic->netdev;
+       struct sk_buff *skb;
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+
+       u8 type, color, eop, sop, ingress_port, vlan_stripped;
+       u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+       u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+       u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+       u8 packet_error;
+       u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
+       u32 rss_hash;
+       bool outer_csum_ok = true, encap = false;
+
+       rqstats->packets++;
+       if (skipped) {
+               rqstats->desc_skip++;
+               return;
+       }
+
+       skb = buf->os_buf;
+
+       cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color,
+                           &q_number, &completed_index, &ingress_port, &fcoe,
+                           &eop, &sop, &rss_type, &csum_not_calc, &rss_hash,
+                           &bytes_written, &packet_error, &vlan_stripped,
+                           &vlan_tci, &checksum, &fcoe_sof, &fcoe_fc_crc_ok,
+                           &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp,
+                           &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+                           &fcs_ok);
+
+       if (packet_error) {
+               if (!fcs_ok) {
+                       if (bytes_written > 0)
+                               rqstats->bad_fcs++;
+                       else if (bytes_written == 0)
+                               rqstats->pkt_truncated++;
+               }
+
+               dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+               buf->os_buf = NULL;
+
+               return;
+       }
+
+       if (eop && bytes_written > 0) {
+               /* Good receive
+                */
+               rqstats->bytes += bytes_written;
+               if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
+                       buf->os_buf = NULL;
+                       dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
+                                        buf->len, DMA_FROM_DEVICE);
+               }
+               prefetch(skb->data - NET_IP_ALIGN);
+
+               skb_put(skb, bytes_written);
+               skb->protocol = eth_type_trans(skb, netdev);
+               skb_record_rx_queue(skb, q_number);
+               if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
+                   type == 3) {
+                       switch (rss_type) {
+                       case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+                       case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+                       case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+                               skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+                               rqstats->l4_rss_hash++;
+                               break;
+                       case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+                       case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+                       case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+                               skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+                               rqstats->l3_rss_hash++;
+                               break;
+                       }
+               }
+               if (enic->vxlan.vxlan_udp_port_number) {
+                       switch (enic->vxlan.patch_level) {
+                       case 0:
+                               if (fcoe) {
+                                       encap = true;
+                                       outer_csum_ok = fcoe_fc_crc_ok;
+                               }
+                               break;
+                       case 2:
+                               if (type == 7 &&
+                                   (rss_hash & BIT(0))) {
+                                       encap = true;
+                                       outer_csum_ok = (rss_hash & BIT(1)) &&
+                                                       (rss_hash & BIT(2));
+                               }
+                               break;
+                       }
+               }
+
+               /* Hardware does not provide whole packet checksum. It only
+                * provides pseudo checksum. Since hw validates the packet
+                * checksum but not provide us the checksum value. use
+                * CHECSUM_UNNECESSARY.
+                *
+                * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+                * inner csum_ok. outer_csum_ok is set by hw when outer udp
+                * csum is correct or is zero.
+                */
+               if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+                   tcp_udp_csum_ok && outer_csum_ok &&
+                   (ipv4_csum_ok || ipv6)) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       skb->csum_level = encap;
+                       if (encap)
+                               rqstats->csum_unnecessary_encap++;
+                       else
+                               rqstats->csum_unnecessary++;
+               }
+
+               if (vlan_stripped) {
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+                       rqstats->vlan_stripped++;
+               }
+               skb_mark_napi_id(skb, &enic->napi[rq->index]);
+               if (!(netdev->features & NETIF_F_GRO))
+                       netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&enic->napi[q_number], skb);
+               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+                       enic_intr_update_pkt_size(&cq->pkt_size_counter,
+                                                 bytes_written);
+       } else {
+               /* Buffer overflow
+                */
+               rqstats->pkt_truncated++;
+               dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+               buf->os_buf = NULL;
+       }
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644 (file)
index 0000000..a75d075
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc.  All rights reserved.
+ */
+
+int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
+                   u16 q_number, u16 completed_index, void *opaque);
+void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
+                         struct vnic_rq_buf *buf, int skipped, void *opaque);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);