]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - net/xdp/xsk_buff_pool.c
Merge tag 'dma-mapping-6.10-2024-05-20' of git://git.infradead.org/users/hch/dma...
[thirdparty/linux.git] / net / xdp / xsk_buff_pool.c
index ce60ecd48a4dc88eed7582bc0701f7c72acc84f5..c0e0204b9630450534f0c32527d40ff02d8175cf 100644 (file)
@@ -338,7 +338,6 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi
 
        dma_map->netdev = netdev;
        dma_map->dev = dev;
-       dma_map->dma_need_sync = false;
        dma_map->dma_pages_cnt = nr_pages;
        refcount_set(&dma_map->users, 1);
        list_add(&dma_map->list, &umem->xsk_dma_list);
@@ -424,7 +423,6 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_
 
        pool->dev = dma_map->dev;
        pool->dma_pages_cnt = dma_map->dma_pages_cnt;
-       pool->dma_need_sync = dma_map->dma_need_sync;
        memcpy(pool->dma_pages, dma_map->dma_pages,
               pool->dma_pages_cnt * sizeof(*pool->dma_pages));
 
@@ -460,8 +458,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
                        __xp_dma_unmap(dma_map, attrs);
                        return -ENOMEM;
                }
-               if (dma_need_sync(dev, dma))
-                       dma_map->dma_need_sync = true;
                dma_map->dma_pages[i] = dma;
        }
 
@@ -557,11 +553,9 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
        xskb->xdp.data_meta = xskb->xdp.data;
        xskb->xdp.flags = 0;
 
-       if (pool->dma_need_sync) {
-               dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
-                                                pool->frame_len,
-                                                DMA_BIDIRECTIONAL);
-       }
+       if (pool->dev)
+               xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len);
+
        return &xskb->xdp;
 }
 EXPORT_SYMBOL(xp_alloc);
@@ -633,7 +627,7 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
 {
        u32 nb_entries1 = 0, nb_entries2;
 
-       if (unlikely(pool->dma_need_sync)) {
+       if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) {
                struct xdp_buff *buff;
 
                /* Slow path */
@@ -693,18 +687,3 @@ dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
                (addr & ~PAGE_MASK);
 }
 EXPORT_SYMBOL(xp_raw_get_dma);
-
-void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
-{
-       dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
-                                     xskb->pool->frame_len, DMA_BIDIRECTIONAL);
-}
-EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
-
-void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
-                                size_t size)
-{
-       dma_sync_single_range_for_device(pool->dev, dma, 0,
-                                        size, DMA_BIDIRECTIONAL);
-}
-EXPORT_SYMBOL(xp_dma_sync_for_device_slow);