]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: cma: kill cma_pages_valid()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Fri, 9 Jan 2026 09:31:33 +0000 (17:31 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 04:02:27 +0000 (20:02 -0800)
Kill cma_pages_valid() which only used in cma_release(), also cleanup code
duplication between cma pages valid checking and cma memrange finding.

Link: https://lkml.kernel.org/r/20260109093136.1491549-4-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Cc: Mark Brown <broonie@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/cma.h
mm/cma.c

index 62d9c1cf632652489ccd9e01bf1370f2b1f3c249..e5745d2aec5595398b73fab4226b6d9d80781dba 100644 (file)
@@ -49,7 +49,6 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
                                        struct cma **res_cma);
 extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
                              bool no_warn);
-extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
 
 extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
index 813e6dc7b0954864c9ef8cf7adc6a2293241de47..fe3a9eaac4e567b804a0d2dd146b1f46fde498ff 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -942,36 +942,6 @@ struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
        return page ? page_folio(page) : NULL;
 }
 
-bool cma_pages_valid(struct cma *cma, const struct page *pages,
-                    unsigned long count)
-{
-       unsigned long pfn, end;
-       int r;
-       struct cma_memrange *cmr;
-       bool ret;
-
-       if (!cma || !pages || count > cma->count)
-               return false;
-
-       pfn = page_to_pfn(pages);
-       ret = false;
-
-       for (r = 0; r < cma->nranges; r++) {
-               cmr = &cma->ranges[r];
-               end = cmr->base_pfn + cmr->count;
-               if (pfn >= cmr->base_pfn && pfn < end) {
-                       ret = pfn + count <= end;
-                       break;
-               }
-       }
-
-       if (!ret)
-               pr_debug("%s(page %p, count %lu)\n",
-                               __func__, (void *)pages, count);
-
-       return ret;
-}
-
 /**
  * cma_release() - release allocated pages
  * @cma:   Contiguous memory region for which the allocation is performed.
@@ -991,23 +961,27 @@ bool cma_release(struct cma *cma, const struct page *pages,
 
        pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
 
-       if (!cma_pages_valid(cma, pages, count))
+       if (!cma || !pages || count > cma->count)
                return false;
 
        pfn = page_to_pfn(pages);
-       end_pfn = pfn + count;
 
        for (r = 0; r < cma->nranges; r++) {
                cmr = &cma->ranges[r];
-               if (pfn >= cmr->base_pfn &&
-                   pfn < (cmr->base_pfn + cmr->count)) {
-                       VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count);
-                       break;
+               end_pfn = cmr->base_pfn + cmr->count;
+               if (pfn >= cmr->base_pfn && pfn < end_pfn) {
+                       if (pfn + count <= end_pfn)
+                               break;
+
+                       VM_WARN_ON_ONCE(1);
                }
        }
 
-       if (r == cma->nranges)
+       if (r == cma->nranges) {
+               pr_debug("%s(page %p, count %lu, no cma range matches the page range)\n",
+                        __func__, (void *)pages, count);
                return false;
+       }
 
        free_contig_range(pfn, count);
        cma_clear_bitmap(cma, cmr, pfn, count);