]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xdp: add generic xdp_build_skb_from_buff()
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Wed, 18 Dec 2024 17:44:31 +0000 (18:44 +0100)
committerJakub Kicinski <kuba@kernel.org>
Fri, 20 Dec 2024 03:51:14 +0000 (19:51 -0800)
The code which builds an skb from an &xdp_buff keeps multiplying itself
around the drivers with almost no changes. Let's try to stop that by
adding a generic function.
Unlike __xdp_build_skb_from_frame(), always allocate an skbuff head
using napi_build_skb() and make use of the available xdp_rxq pointer to
assign the Rx queue index. In case of PP-backed buffer, mark the skb to
be recycled, as every PP user's been switched to recycle skbs.

Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://patch.msgid.link/20241218174435.1445282-4-aleksander.lobakin@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/xdp.h
net/core/xdp.c

index 11139c210b498a9a69b0db1deb96d7ceac79873b..aa24fa78cbe6e8bdf60cd8d9cb3d4f9c8502f46c 100644 (file)
@@ -336,6 +336,7 @@ xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
 void xdp_warn(const char *msg, const char *func, const int line);
 #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
 
+struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp);
 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
                                           struct sk_buff *skb,
index a66a4e036f53fd5ade48a2ae43cb1a127cc47adf..704203a15a18e7cf6a092e7fa2e74bd0969004f2 100644 (file)
@@ -629,6 +629,61 @@ int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
 }
 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
 
+/**
+ * xdp_build_skb_from_buff - create an skb from &xdp_buff
+ * @xdp: &xdp_buff to convert to an skb
+ *
+ * Perform common operations to create a new skb to pass up the stack from
+ * &xdp_buff: allocate an skb head from the NAPI percpu cache, initialize
+ * skb data pointers and offsets, set the recycle bit if the buff is
+ * PP-backed, Rx queue index, protocol and update frags info.
+ *
+ * Return: new &sk_buff on success, %NULL on error.
+ */
+struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp)
+{
+       const struct xdp_rxq_info *rxq = xdp->rxq;
+       const struct skb_shared_info *sinfo;
+       struct sk_buff *skb;
+       u32 nr_frags = 0;
+       int metalen;
+
+       if (unlikely(xdp_buff_has_frags(xdp))) {
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               nr_frags = sinfo->nr_frags;
+       }
+
+       skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
+       if (unlikely(!skb))
+               return NULL;
+
+       skb_reserve(skb, xdp->data - xdp->data_hard_start);
+       __skb_put(skb, xdp->data_end - xdp->data);
+
+       metalen = xdp->data - xdp->data_meta;
+       if (metalen > 0)
+               skb_metadata_set(skb, metalen);
+
+       if (rxq->mem.type == MEM_TYPE_PAGE_POOL)
+               skb_mark_for_recycle(skb);
+
+       skb_record_rx_queue(skb, rxq->queue_index);
+
+       if (unlikely(nr_frags)) {
+               u32 tsize;
+
+               tsize = sinfo->xdp_frags_truesize ? : nr_frags * xdp->frame_sz;
+               xdp_update_skb_shared_info(skb, nr_frags,
+                                          sinfo->xdp_frags_size, tsize,
+                                          xdp_buff_is_frag_pfmemalloc(xdp));
+       }
+
+       skb->protocol = eth_type_trans(skb, rxq->dev);
+
+       return skb;
+}
+EXPORT_SYMBOL_GPL(xdp_build_skb_from_buff);
+
 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
                                           struct sk_buff *skb,
                                           struct net_device *dev)