]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dma-direct: clean up the logic in __dma_direct_alloc_pages()
authorPetr Tesarik <ptesarik@suse.com>
Thu, 10 Jul 2025 08:38:29 +0000 (10:38 +0200)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Mon, 11 Aug 2025 09:28:04 +0000 (11:28 +0200)
Convert a goto-based loop to a while() loop. To allow the simplification,
return early when allocation from CMA is successful. As a bonus, this early
return avoids a repeated dma_coherent_ok() check.

No functional change.

Signed-off-by: Petr Tesarik <ptesarik@suse.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20250710083829.1853466-1-ptesarik@suse.com
kernel/dma/direct.c

index 24c359d9c8799fd7d71b34ad1e42ad3cf1f744c7..302e89580972af82b762fea101325a265b17121d 100644 (file)
@@ -120,7 +120,7 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
                gfp_t gfp, bool allow_highmem)
 {
        int node = dev_to_node(dev);
-       struct page *page = NULL;
+       struct page *page;
        u64 phys_limit;
 
        WARN_ON_ONCE(!PAGE_ALIGNED(size));
@@ -131,30 +131,25 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
        gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
        page = dma_alloc_contiguous(dev, size, gfp);
        if (page) {
-               if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
-                   (!allow_highmem && PageHighMem(page))) {
-                       dma_free_contiguous(dev, page, size);
-                       page = NULL;
-               }
+               if (dma_coherent_ok(dev, page_to_phys(page), size) &&
+                   (allow_highmem || !PageHighMem(page)))
+                       return page;
+
+               dma_free_contiguous(dev, page, size);
        }
-again:
-       if (!page)
-               page = alloc_pages_node(node, gfp, get_order(size));
-       if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+
+       while ((page = alloc_pages_node(node, gfp, get_order(size)))
+              && !dma_coherent_ok(dev, page_to_phys(page), size)) {
                __free_pages(page, get_order(size));
-               page = NULL;
 
                if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
                    phys_limit < DMA_BIT_MASK(64) &&
-                   !(gfp & (GFP_DMA32 | GFP_DMA))) {
+                   !(gfp & (GFP_DMA32 | GFP_DMA)))
                        gfp |= GFP_DMA32;
-                       goto again;
-               }
-
-               if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
+               else if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA))
                        gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
-                       goto again;
-               }
+               else
+                       return NULL;
        }
 
        return page;