]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/cma: simplify zone intersection check
authorFrank van der Linden <fvdl@google.com>
Fri, 28 Feb 2025 18:29:23 +0000 (18:29 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Mar 2025 05:06:30 +0000 (22:06 -0700)
cma_activate_area walks all pages in the area, checking their zone
individually to see if the area resides in more than one zone.

Make this a little more efficient by using the recently introduced
pfn_range_intersects_zones() function.  Store the NUMA node id (if any) in
the cma structure to facilitate this.

Link: https://lkml.kernel.org/r/20250228182928.2645936-23-fvdl@google.com
Signed-off-by: Frank van der Linden <fvdl@google.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/cma.c
mm/cma.h

index 8dc46bfa38197c3b4e264d61e964bbab4d114cc6..61ad4fd2f62d5d11cc02062bfcc7f3a65700516b 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -103,7 +103,6 @@ static void __init cma_activate_area(struct cma *cma)
 {
        unsigned long pfn, base_pfn;
        int allocrange, r;
-       struct zone *zone;
        struct cma_memrange *cmr;
 
        for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
@@ -124,12 +123,8 @@ static void __init cma_activate_area(struct cma *cma)
                 * CMA resv range to be in the same zone.
                 */
                WARN_ON_ONCE(!pfn_valid(base_pfn));
-               zone = page_zone(pfn_to_page(base_pfn));
-               for (pfn = base_pfn + 1; pfn < base_pfn + cmr->count; pfn++) {
-                       WARN_ON_ONCE(!pfn_valid(pfn));
-                       if (page_zone(pfn_to_page(pfn)) != zone)
-                               goto cleanup;
-               }
+               if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count))
+                       goto cleanup;
 
                for (pfn = base_pfn; pfn < base_pfn + cmr->count;
                     pfn += pageblock_nr_pages)
@@ -261,6 +256,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
        cma->ranges[0].base_pfn = PFN_DOWN(base);
        cma->ranges[0].count = cma->count;
        cma->nranges = 1;
+       cma->nid = NUMA_NO_NODE;
 
        *res_cma = cma;
 
@@ -497,6 +493,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
        }
 
        cma->nranges = nr;
+       cma->nid = nid;
        *res_cma = cma;
 
 out:
@@ -684,6 +681,8 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t base,
        if (ret)
                memblock_phys_free(base, size);
 
+       (*res_cma)->nid = nid;
+
        return ret;
 }
 
index 5f39dd1aac916fae0f39152f66d11b96ababb8f8..ff79dba5508cb283903dbf6a834be654b4fa2ef3 100644 (file)
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -50,6 +50,8 @@ struct cma {
        struct cma_kobject *cma_kobj;
 #endif
        bool reserve_pages_on_error;
+       /* NUMA node (NUMA_NO_NODE if unspecified) */
+       int nid;
 };
 
 extern struct cma cma_areas[MAX_CMA_AREAS];