]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xsk: make xsk_buff_add_frag() really add the frag via __xdp_buff_add_frag()
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Wed, 18 Dec 2024 17:44:32 +0000 (18:44 +0100)
committerJakub Kicinski <kuba@kernel.org>
Fri, 20 Dec 2024 03:51:14 +0000 (19:51 -0800)
Currently, xsk_buff_add_frag() only adds the frag to pool's linked list,
not doing anything with the &xdp_buff. The drivers do that manually and
the logic is the same.
Make it really add an skb frag, just like xdp_buff_add_frag() does that,
and freeing frags on error if needed. This allows to remove repeating
code from i40e and ice and not add the same code again and again.

Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://patch.msgid.link/20241218174435.1445282-5-aleksander.lobakin@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice_xsk.c
include/net/xdp_sock_drv.h

index 4e885df789ef4def29daa53e37ee02e9c1d9523f..e28f1905a4a0f884db27bbe6f56d7217b565ef68 100644 (file)
@@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
        WARN_ON_ONCE(1);
 }
 
-static int
-i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
-                 struct xdp_buff *xdp, const unsigned int size)
-{
-       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
-       if (!xdp_buff_has_frags(first)) {
-               sinfo->nr_frags = 0;
-               sinfo->xdp_frags_size = 0;
-               xdp_buff_set_frags_flag(first);
-       }
-
-       if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
-               xsk_buff_free(first);
-               return -ENOMEM;
-       }
-
-       __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start),
-                                  XDP_PACKET_HEADROOM, size);
-       sinfo->xdp_frags_size += size;
-       xsk_buff_add_frag(xdp);
-
-       return 0;
-}
-
 /**
  * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
  * @rx_ring: Rx ring
@@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
 
                if (!first)
                        first = bi;
-               else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+               else if (!xsk_buff_add_frag(first, bi)) {
+                       xsk_buff_free(first);
                        break;
+               }
 
                if (++next_to_process == count)
                        next_to_process = 0;
index 334ae945d6404ccebe2cc1ff5b16643d5ef6aaa2..8975d2971bc3778aad8898e334ce1d759618fd32 100644 (file)
@@ -801,35 +801,6 @@ out_failure:
        return result;
 }
 
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
-                struct xdp_buff *xdp, const unsigned int size)
-{
-       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
-       if (!size)
-               return 0;
-
-       if (!xdp_buff_has_frags(first)) {
-               sinfo->nr_frags = 0;
-               sinfo->xdp_frags_size = 0;
-               xdp_buff_set_frags_flag(first);
-       }
-
-       if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
-               xsk_buff_free(first);
-               return -ENOMEM;
-       }
-
-       __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start),
-                                  XDP_PACKET_HEADROOM, size);
-       sinfo->xdp_frags_size += size;
-       xsk_buff_add_frag(xdp);
-
-       return 0;
-}
-
 /**
  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
  * @rx_ring: AF_XDP Rx ring
@@ -895,7 +866,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
 
                if (!first) {
                        first = xdp;
-               } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+               } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+                       xsk_buff_free(first);
                        break;
                }
 
index f3175a5d28f79756731f6449ca1d4be3eb2ed6e0..86620c81896592e7c5cac0add0dcebac243e988f 100644 (file)
@@ -136,11 +136,21 @@ out:
        xp_free(xskb);
 }
 
-static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+static inline bool xsk_buff_add_frag(struct xdp_buff *head,
+                                    struct xdp_buff *xdp)
 {
-       struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+       const void *data = xdp->data;
+       struct xdp_buff_xsk *frag;
+
+       if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
+                                offset_in_page(data), xdp->data_end - data,
+                                xdp->frame_sz, false))
+               return false;
 
+       frag = container_of(xdp, struct xdp_buff_xsk, xdp);
        list_add_tail(&frag->list_node, &frag->pool->xskb_list);
+
+       return true;
 }
 
 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
@@ -357,8 +367,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
 {
 }
 
-static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+static inline bool xsk_buff_add_frag(struct xdp_buff *head,
+                                    struct xdp_buff *xdp)
 {
+       return false;
 }
 
 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)