]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dma/pool: Avoid allocating redundant pools
authorRobin Murphy <robin.murphy@arm.com>
Mon, 12 Jan 2026 15:46:38 +0000 (15:46 +0000)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 14 Jan 2026 10:00:00 +0000 (11:00 +0100)
On smaller systems, e.g. embedded arm64, it is common for all memory
to end up in ZONE_DMA32 or even ZONE_DMA. In such cases it is redundant
to allocate a nominal pool for an empty higher zone that just ends up
coming from a lower zone that should already have its own pool anyway.
We already have logic to skip allocating a ZONE_DMA pool when that is
empty, so generalise that to save memory in the case of other zones too.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Vladimir Kondratiev <vladimir.kondratiev@mobileye.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/8ab8d8a620dee0109f33f5cb63d6bfeed35aac37.1768230104.git.robin.murphy@arm.com
kernel/dma/pool.c

index 2645cfb5718b67a2376bdd4bcee6e88926c252df..c5da29ad010c4db6a14879e39ab6fbb2e5cc539c 100644 (file)
@@ -184,6 +184,12 @@ static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
        return pool;
 }
 
+#ifdef CONFIG_ZONE_DMA32
+#define has_managed_dma32 has_managed_zone(ZONE_DMA32)
+#else
+#define has_managed_dma32 false
+#endif
+
 static int __init dma_atomic_pool_init(void)
 {
        int ret = 0;
@@ -199,17 +205,20 @@ static int __init dma_atomic_pool_init(void)
        }
        INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
 
-       atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
+       /* All memory might be in the DMA zone(s) to begin with */
+       if (has_managed_zone(ZONE_NORMAL)) {
+               atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
                                                    GFP_KERNEL);
-       if (!atomic_pool_kernel)
-               ret = -ENOMEM;
+               if (!atomic_pool_kernel)
+                       ret = -ENOMEM;
+       }
        if (has_managed_dma()) {
                atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
                                                GFP_KERNEL | GFP_DMA);
                if (!atomic_pool_dma)
                        ret = -ENOMEM;
        }
-       if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
+       if (has_managed_dma32) {
                atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
                                                GFP_KERNEL | GFP_DMA32);
                if (!atomic_pool_dma32)
@@ -228,7 +237,7 @@ static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
                        return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel;
                if (gfp & GFP_DMA32)
                        return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel;
-               return atomic_pool_kernel;
+               return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma;
        }
        if (prev == atomic_pool_kernel)
                return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;