]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cma: move __cma_declare_contiguous_nid() before its usage
authorMike Rapoport (Microsoft) <rppt@kernel.org>
Thu, 3 Jul 2025 18:47:09 +0000 (21:47 +0300)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 13 Jul 2025 23:38:24 +0000 (16:38 -0700)
Patch series "cma: factor out allocation logic from
__cma_declare_contiguous_nid", v2.

We've discussed earlier that HIGHMEM related logic is spread all over
__cma_declare_contiguous_nid().  These patches decouple it into helper
functions.

This patch (of 3):

Move __cma_declare_contiguous_nid() before its usage and kill forward
declaration.

Link: https://lkml.kernel.org/r/20250703184711.3485940-1-rppt@kernel.org
Link: https://lkml.kernel.org/r/20250703184711.3485940-2-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Oscar Salvador <osalvador@suse.de>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Pratyush Yadav <ptyadav@amazon.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/cma.c

index c40d53298801640df9d02c565eafe0e3cd1feb39..19d0371abff83a128fa23f917f76270b572d7b1b 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
 struct cma cma_areas[MAX_CMA_AREAS];
 unsigned int cma_area_count;
 
-static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
-                       phys_addr_t size, phys_addr_t limit,
-                       phys_addr_t alignment, unsigned int order_per_bit,
-                       bool fixed, const char *name, struct cma **res_cma,
-                       int nid);
-
 phys_addr_t cma_get_base(const struct cma *cma)
 {
        WARN_ON_ONCE(cma->nranges != 1);
@@ -359,6 +353,150 @@ static void __init list_insert_sorted(
        }
 }
 
+static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
+                       phys_addr_t size, phys_addr_t limit,
+                       phys_addr_t alignment, unsigned int order_per_bit,
+                       bool fixed, const char *name, struct cma **res_cma,
+                       int nid)
+{
+       phys_addr_t memblock_end = memblock_end_of_DRAM();
+       phys_addr_t highmem_start, base = *basep;
+       int ret;
+
+       /*
+        * We can't use __pa(high_memory) directly, since high_memory
+        * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
+        * complain. Find the boundary by adding one to the last valid
+        * address.
+        */
+       if (IS_ENABLED(CONFIG_HIGHMEM))
+               highmem_start = __pa(high_memory - 1) + 1;
+       else
+               highmem_start = memblock_end_of_DRAM();
+       pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
+               __func__, &size, &base, &limit, &alignment);
+
+       if (cma_area_count == ARRAY_SIZE(cma_areas)) {
+               pr_err("Not enough slots for CMA reserved regions!\n");
+               return -ENOSPC;
+       }
+
+       if (!size)
+               return -EINVAL;
+
+       if (alignment && !is_power_of_2(alignment))
+               return -EINVAL;
+
+       if (!IS_ENABLED(CONFIG_NUMA))
+               nid = NUMA_NO_NODE;
+
+       /* Sanitise input arguments. */
+       alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
+       if (fixed && base & (alignment - 1)) {
+               pr_err("Region at %pa must be aligned to %pa bytes\n",
+                       &base, &alignment);
+               return -EINVAL;
+       }
+       base = ALIGN(base, alignment);
+       size = ALIGN(size, alignment);
+       limit &= ~(alignment - 1);
+
+       if (!base)
+               fixed = false;
+
+       /* size should be aligned with order_per_bit */
+       if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
+               return -EINVAL;
+
+       /*
+        * If allocating at a fixed base the request region must not cross the
+        * low/high memory boundary.
+        */
+       if (fixed && base < highmem_start && base + size > highmem_start) {
+               pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
+                       &base, &highmem_start);
+               return -EINVAL;
+       }
+
+       /*
+        * If the limit is unspecified or above the memblock end, its effective
+        * value will be the memblock end. Set it explicitly to simplify further
+        * checks.
+        */
+       if (limit == 0 || limit > memblock_end)
+               limit = memblock_end;
+
+       if (base + size > limit) {
+               pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
+                       &size, &base, &limit);
+               return -EINVAL;
+       }
+
+       /* Reserve memory */
+       if (fixed) {
+               if (memblock_is_region_reserved(base, size) ||
+                   memblock_reserve(base, size) < 0) {
+                       return -EBUSY;
+               }
+       } else {
+               phys_addr_t addr = 0;
+
+               /*
+                * If there is enough memory, try a bottom-up allocation first.
+                * It will place the new cma area close to the start of the node
+                * and guarantee that the compaction is moving pages out of the
+                * cma area and not into it.
+                * Avoid using first 4GB to not interfere with constrained zones
+                * like DMA/DMA32.
+                */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+               if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
+                       memblock_set_bottom_up(true);
+                       addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
+                                                       limit, nid, true);
+                       memblock_set_bottom_up(false);
+               }
+#endif
+
+               /*
+                * All pages in the reserved area must come from the same zone.
+                * If the requested region crosses the low/high memory boundary,
+                * try allocating from high memory first and fall back to low
+                * memory in case of failure.
+                */
+               if (!addr && base < highmem_start && limit > highmem_start) {
+                       addr = memblock_alloc_range_nid(size, alignment,
+                                       highmem_start, limit, nid, true);
+                       limit = highmem_start;
+               }
+
+               if (!addr) {
+                       addr = memblock_alloc_range_nid(size, alignment, base,
+                                       limit, nid, true);
+                       if (!addr)
+                               return -ENOMEM;
+               }
+
+               /*
+                * kmemleak scans/reads tracked objects for pointers to other
+                * objects but this address isn't mapped and accessible
+                */
+               kmemleak_ignore_phys(addr);
+               base = addr;
+       }
+
+       ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
+       if (ret) {
+               memblock_phys_free(base, size);
+               return ret;
+       }
+
+       (*res_cma)->nid = nid;
+       *basep = base;
+
+       return 0;
+}
+
 /*
  * Create CMA areas with a total size of @total_size. A normal allocation
  * for one area is tried first. If that fails, the biggest memblock
@@ -593,150 +731,6 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
        return ret;
 }
 
-static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
-                       phys_addr_t size, phys_addr_t limit,
-                       phys_addr_t alignment, unsigned int order_per_bit,
-                       bool fixed, const char *name, struct cma **res_cma,
-                       int nid)
-{
-       phys_addr_t memblock_end = memblock_end_of_DRAM();
-       phys_addr_t highmem_start, base = *basep;
-       int ret;
-
-       /*
-        * We can't use __pa(high_memory) directly, since high_memory
-        * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
-        * complain. Find the boundary by adding one to the last valid
-        * address.
-        */
-       if (IS_ENABLED(CONFIG_HIGHMEM))
-               highmem_start = __pa(high_memory - 1) + 1;
-       else
-               highmem_start = memblock_end_of_DRAM();
-       pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
-               __func__, &size, &base, &limit, &alignment);
-
-       if (cma_area_count == ARRAY_SIZE(cma_areas)) {
-               pr_err("Not enough slots for CMA reserved regions!\n");
-               return -ENOSPC;
-       }
-
-       if (!size)
-               return -EINVAL;
-
-       if (alignment && !is_power_of_2(alignment))
-               return -EINVAL;
-
-       if (!IS_ENABLED(CONFIG_NUMA))
-               nid = NUMA_NO_NODE;
-
-       /* Sanitise input arguments. */
-       alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
-       if (fixed && base & (alignment - 1)) {
-               pr_err("Region at %pa must be aligned to %pa bytes\n",
-                       &base, &alignment);
-               return -EINVAL;
-       }
-       base = ALIGN(base, alignment);
-       size = ALIGN(size, alignment);
-       limit &= ~(alignment - 1);
-
-       if (!base)
-               fixed = false;
-
-       /* size should be aligned with order_per_bit */
-       if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
-               return -EINVAL;
-
-       /*
-        * If allocating at a fixed base the request region must not cross the
-        * low/high memory boundary.
-        */
-       if (fixed && base < highmem_start && base + size > highmem_start) {
-               pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
-                       &base, &highmem_start);
-               return -EINVAL;
-       }
-
-       /*
-        * If the limit is unspecified or above the memblock end, its effective
-        * value will be the memblock end. Set it explicitly to simplify further
-        * checks.
-        */
-       if (limit == 0 || limit > memblock_end)
-               limit = memblock_end;
-
-       if (base + size > limit) {
-               pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
-                       &size, &base, &limit);
-               return -EINVAL;
-       }
-
-       /* Reserve memory */
-       if (fixed) {
-               if (memblock_is_region_reserved(base, size) ||
-                   memblock_reserve(base, size) < 0) {
-                       return -EBUSY;
-               }
-       } else {
-               phys_addr_t addr = 0;
-
-               /*
-                * If there is enough memory, try a bottom-up allocation first.
-                * It will place the new cma area close to the start of the node
-                * and guarantee that the compaction is moving pages out of the
-                * cma area and not into it.
-                * Avoid using first 4GB to not interfere with constrained zones
-                * like DMA/DMA32.
-                */
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-               if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
-                       memblock_set_bottom_up(true);
-                       addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
-                                                       limit, nid, true);
-                       memblock_set_bottom_up(false);
-               }
-#endif
-
-               /*
-                * All pages in the reserved area must come from the same zone.
-                * If the requested region crosses the low/high memory boundary,
-                * try allocating from high memory first and fall back to low
-                * memory in case of failure.
-                */
-               if (!addr && base < highmem_start && limit > highmem_start) {
-                       addr = memblock_alloc_range_nid(size, alignment,
-                                       highmem_start, limit, nid, true);
-                       limit = highmem_start;
-               }
-
-               if (!addr) {
-                       addr = memblock_alloc_range_nid(size, alignment, base,
-                                       limit, nid, true);
-                       if (!addr)
-                               return -ENOMEM;
-               }
-
-               /*
-                * kmemleak scans/reads tracked objects for pointers to other
-                * objects but this address isn't mapped and accessible
-                */
-               kmemleak_ignore_phys(addr);
-               base = addr;
-       }
-
-       ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
-       if (ret) {
-               memblock_phys_free(base, size);
-               return ret;
-       }
-
-       (*res_cma)->nid = nid;
-       *basep = base;
-
-       return 0;
-}
-
 static void cma_debug_show_areas(struct cma *cma)
 {
        unsigned long next_zero_bit, next_set_bit, nr_zero;