]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/pagemap: Correct cpages calculation for migrate_vma_setup
authorMatthew Brost <matthew.brost@intel.com>
Thu, 12 Mar 2026 19:20:13 +0000 (20:20 +0100)
committerMatthew Brost <matthew.brost@intel.com>
Sat, 14 Mar 2026 01:12:51 +0000 (18:12 -0700)
cpages returned from migrate_vma_setup represents the total number of
individual pages found, not the number of 4K pages. The math in
drm_pagemap_migrate_to_devmem for npages is based on the number of 4K
pages, so cpages != npages can fail even if the entire memory range is
found in migrate_vma_setup (e.g., when a single 2M page is found).
Add drm_pagemap_cpages, which converts cpages to the number of 4K pages
found.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: linux-mm@kvack.org
Reviewed-by: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Balbir Singh <balbirs@nvidia.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20260312192126.2024853-4-francois.dugast@intel.com
drivers/gpu/drm/drm_pagemap.c

index 733a3857947c3e8fbec464b165283c2ab605a3e4..837b881883f952436b5093b545f930578309b84d 100644 (file)
@@ -452,6 +452,41 @@ out:
        return ret;
 }
 
+/**
+ * drm_pagemap_cpages() - Count collected pages
+ * @migrate_pfn: Array of migrate_pfn entries to account
+ * @npages: Number of entries in @migrate_pfn
+ *
+ * Compute the total number of minimum-sized pages represented by the
+ * collected entries in @migrate_pfn. The total is derived from the
+ * order encoded in each entry.
+ *
+ * Return: Total number of minimum-sized pages.
+ */
+static int drm_pagemap_cpages(unsigned long *migrate_pfn, unsigned long npages)
+{
+       unsigned long i, cpages = 0;
+
+       for (i = 0; i < npages;) {
+               struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+               struct folio *folio;
+               unsigned int order = 0;
+
+               if (page) {
+                       folio = page_folio(page);
+                       order = folio_order(folio);
+                       cpages += NR_PAGES(order);
+               } else if (migrate_pfn[i] & MIGRATE_PFN_COMPOUND) {
+                       order = HPAGE_PMD_ORDER;
+                       cpages += NR_PAGES(order);
+               }
+
+               i += NR_PAGES(order);
+       }
+
+       return cpages;
+}
+
 /**
  * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
  * @devmem_allocation: The device memory allocation to migrate to.
@@ -554,7 +589,8 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
                goto err_free;
        }
 
-       if (migrate.cpages != npages) {
+       if (migrate.cpages != npages &&
+           drm_pagemap_cpages(migrate.src, npages) != npages) {
                /*
                 * Some pages to migrate. But we want to migrate all or
                 * nothing. Raced or unknown device pages.