]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
xsk: make xsk_buff_pool responsible for clearing xdp_buff::flags
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Wed, 24 Jan 2024 19:15:53 +0000 (20:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Feb 2024 00:19:04 +0000 (16:19 -0800)
[ Upstream commit f7f6aa8e24383fbb11ac55942e66da9660110f80 ]

XDP multi-buffer support introduced XDP_FLAGS_HAS_FRAGS flag that is
used by drivers to notify data path whether xdp_buff contains fragments
or not. Data path looks up mentioned flag on first buffer that occupies
the linear part of xdp_buff, so drivers only modify it there. This is
sufficient for SKB and XDP_DRV modes as usually xdp_buff is allocated on
stack or it resides within struct representing driver's queue and
fragments are carried via skb_frag_t structs. IOW, we are dealing with
only one xdp_buff.

ZC mode though relies on list of xdp_buff structs that is carried via
xsk_buff_pool::xskb_list, so ZC data path has to make sure that
fragments do *not* have XDP_FLAGS_HAS_FRAGS set. Otherwise,
xsk_buff_free() could misbehave if it would be executed against xdp_buff
that carries a frag with XDP_FLAGS_HAS_FRAGS flag set. Such scenario can
take place when within supplied XDP program bpf_xdp_adjust_tail() is
used with negative offset that would in turn release the tail fragment
from multi-buffer frame.

Calling xsk_buff_free() on tail fragment with XDP_FLAGS_HAS_FRAGS would
result in releasing all the nodes from xskb_list that were produced by
driver before XDP program execution, which is not what is intended -
only tail fragment should be deleted from xskb_list and then it should
be put onto xsk_buff_pool::free_list. Such multi-buffer frame will never
make it up to user space, so from AF_XDP application POV there would be
no traffic running, however due to free_list getting constantly new
nodes, driver will be able to feed HW Rx queue with recycled buffers.
Bottom line is that instead of traffic being redirected to user space,
it would be continuously dropped.

To fix this, let us clear the mentioned flag on xsk_buff_pool side
during xdp_buff initialization, which is what should have been done
right from the start of XSK multi-buffer support.

Fixes: 1bbc04de607b ("ice: xsk: add RX multi-buffer support")
Fixes: 1c9ba9c14658 ("i40e: xsk: add RX multi-buffer support")
Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://lore.kernel.org/r/20240124191602.566724-3-maciej.fijalkowski@intel.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice_xsk.c
include/net/xdp_sock_drv.h
net/xdp/xsk_buff_pool.c

index 7d991e4d9b896c67d5bff178b3e38c35a290b03f..b75e6b6d317cbbf4417cab2478c7fe2182f69793 100644 (file)
@@ -503,7 +503,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
                i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
                                          &rx_bytes, xdp_res, &failure);
-               first->flags = 0;
                next_to_clean = next_to_process;
                if (failure)
                        break;
index 2a3f0834e13917018ff67561301e559093033785..33f194c870bb2be9001e53f9fd9f7487c457b583 100644 (file)
@@ -897,7 +897,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 
                if (!first) {
                        first = xdp;
-                       xdp_buff_clear_frags_flag(first);
                } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
                        break;
                }
index 1f6fc8c7a84c6c68166f1c231e8afec5998ed228..7290eb721c079b5876e3d5fd78dec6afa45e317f 100644 (file)
@@ -152,6 +152,7 @@ static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
        xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
        xdp->data_meta = xdp->data;
        xdp->data_end = xdp->data + size;
+       xdp->flags = 0;
 }
 
 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
index 49cb9f9a09beedb8f029da2325ea1fad1d338e0c..b0a611677865d21dae9815f9b25a20569750e12f 100644 (file)
@@ -541,6 +541,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
        xskb->xdp.data_meta = xskb->xdp.data;
+       xskb->xdp.flags = 0;
 
        if (pool->dma_need_sync) {
                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,