/**
* drm_pagemap_get_devmem_page() - Get a reference to a device memory page
* @page: Pointer to the page
+ * @order: Order
* @zdd: Pointer to the GPU SVM zone device data
*
* This function associates the given page with the specified GPU SVM zone
* device data and initializes it for zone device usage.
*/
static void drm_pagemap_get_devmem_page(struct page *page,
+ unsigned int order,
struct drm_pagemap_zdd *zdd)
{
- page->zone_device_data = drm_pagemap_zdd_get(zdd);
- zone_device_page_init(page, page_pgmap(page), 0);
+ zone_device_folio_init((struct folio *)page, zdd->dpagemap->pagemap,
+ order);
+ folio_set_zone_device_data(page_folio(page), drm_pagemap_zdd_get(zdd));
}
/**
.end = end,
.pgmap_owner = pagemap->owner,
.flags = MIGRATE_VMA_SELECT_SYSTEM | MIGRATE_VMA_SELECT_DEVICE_COHERENT |
- MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE | MIGRATE_VMA_SELECT_COMPOUND,
};
unsigned long i, npages = npages_in_range(start, end);
unsigned long own_pages = 0, migrated_pages = 0;
own_pages = 0;
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
+ unsigned long j;
struct page *page = pfn_to_page(migrate.dst[i]);
struct page *src_page = migrate_pfn_to_page(migrate.src[i]);
- cur.start = i;
+ unsigned int order = 0;
+ cur.start = i;
pages[i] = NULL;
if (src_page && is_device_private_page(src_page)) {
struct drm_pagemap_zdd *src_zdd =
!mdetails->can_migrate_same_pagemap) {
migrate.dst[i] = 0;
own_pages++;
- continue;
+ goto next;
}
if (mdetails->source_peer_migrates) {
cur.dpagemap = src_zdd->dpagemap;
pages[i] = page;
}
migrate.dst[i] = migrate_pfn(migrate.dst[i]);
- drm_pagemap_get_devmem_page(page, zdd);
+
+ if (migrate.src[i] & MIGRATE_PFN_COMPOUND) {
+ drm_WARN_ONCE(dpagemap->drm, src_page &&
+ folio_order(page_folio(src_page)) != HPAGE_PMD_ORDER,
+ "Unexpected folio order\n");
+
+ order = HPAGE_PMD_ORDER;
+ migrate.dst[i] |= MIGRATE_PFN_COMPOUND;
+
+ for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
+ migrate.dst[i + j] = 0;
+ }
+
+ drm_pagemap_get_devmem_page(page, order, zdd);
/* If we switched the migrating drm_pagemap, migrate previous pages now */
err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst,
npages = i + 1;
goto err_finalize;
}
+
+next:
+ i += NR_PAGES(order);
}
+
cur.start = npages;
cur.ops = NULL; /* Force migration */
err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst,
page = folio_page(folio, 0);
mpfn[i] = migrate_pfn(page_to_pfn(page));
+ if (order)
+ mpfn[i] |= MIGRATE_PFN_COMPOUND;
next:
if (page)
addr += page_size(page);
if (err)
goto err_finalize;
- for (i = 0; i < npages; ++i)
+ for (i = 0; i < npages;) {
+ unsigned int order = 0;
+
pages[i] = migrate_pfn_to_page(src[i]);
+ if (pages[i])
+ order = folio_order(page_folio(pages[i]));
+
+ i += NR_PAGES(order);
+ }
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
.vma = vas,
.pgmap_owner = page_pgmap(page)->owner,
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
- MIGRATE_VMA_SELECT_DEVICE_COHERENT,
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT |
+ MIGRATE_VMA_SELECT_COMPOUND,
.fault_page = page,
};
struct drm_pagemap_migrate_details mdetails = {};
if (err)
goto err_finalize;
- for (i = 0; i < npages; ++i)
+ for (i = 0; i < npages;) {
+ unsigned int order = 0;
+
pages[i] = migrate_pfn_to_page(migrate.src[i]);
+ if (pages[i])
+ order = folio_order(page_folio(pages[i]));
+
+ i += NR_PAGES(order);
+ }
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
return err ? VM_FAULT_SIGBUS : 0;
}
+static void drm_pagemap_folio_split(struct folio *orig_folio, struct folio *new_folio)
+{
+ struct drm_pagemap_zdd *zdd;
+
+ if (!new_folio)
+ return;
+
+ new_folio->pgmap = orig_folio->pgmap;
+ zdd = folio_zone_device_data(orig_folio);
+ folio_set_zone_device_data(new_folio, drm_pagemap_zdd_get(zdd));
+}
+
static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
.folio_free = drm_pagemap_folio_free,
.migrate_to_ram = drm_pagemap_migrate_to_ram,
+ .folio_split = drm_pagemap_folio_split,
};
/**