From: Robin Murphy Date: Mon, 12 Jan 2026 15:46:38 +0000 (+0000) Subject: dma/pool: Avoid allocating redundant pools X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c6ccd098807483762ccd726e1498bac5a71d0005;p=thirdparty%2Fkernel%2Flinux.git dma/pool: Avoid allocating redundant pools On smaller systems, e.g. embedded arm64, it is common for all memory to end up in ZONE_DMA32 or even ZONE_DMA. In such cases it is redundant to allocate a nominal pool for an empty higher zone that just ends up coming from a lower zone that should already have its own pool anyway. We already have logic to skip allocating a ZONE_DMA pool when that is empty, so generalise that to save memory in the case of other zones too. Signed-off-by: Robin Murphy Tested-by: Vladimir Kondratiev Reviewed-by: Baoquan He Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/8ab8d8a620dee0109f33f5cb63d6bfeed35aac37.1768230104.git.robin.murphy@arm.com --- diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 2645cfb5718b..c5da29ad010c 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -184,6 +184,12 @@ static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size, return pool; } +#ifdef CONFIG_ZONE_DMA32 +#define has_managed_dma32 has_managed_zone(ZONE_DMA32) +#else +#define has_managed_dma32 false +#endif + static int __init dma_atomic_pool_init(void) { int ret = 0; @@ -199,17 +205,20 @@ static int __init dma_atomic_pool_init(void) } INIT_WORK(&atomic_pool_work, atomic_pool_work_fn); - atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size, + /* All memory might be in the DMA zone(s) to begin with */ + if (has_managed_zone(ZONE_NORMAL)) { + atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL); - if (!atomic_pool_kernel) - ret = -ENOMEM; + if (!atomic_pool_kernel) + ret = -ENOMEM; + } if (has_managed_dma()) { atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL | GFP_DMA); if (!atomic_pool_dma) ret = -ENOMEM; } - if (IS_ENABLED(CONFIG_ZONE_DMA32)) { + if (has_managed_dma32) { atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL | GFP_DMA32); if (!atomic_pool_dma32) @@ -228,7 +237,7 @@ static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel; if (gfp & GFP_DMA32) return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel; - return atomic_pool_kernel; + return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma; } if (prev == atomic_pool_kernel) return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;