From: Francois Dugast Date: Tue, 5 Aug 2025 13:59:04 +0000 (+0200) Subject: drm/pagemap: DMA map folios when possible X-Git-Tag: v6.18-rc1~134^2~18^2~95 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=d755ff6063852cbd43c666726b69333d33d0d379;p=thirdparty%2Flinux.git drm/pagemap: DMA map folios when possible If the page is part of a folio, DMA map the whole folio at once instead of mapping individual pages one after the other. For example if 2MB folios are used instead of 4KB pages, this reduces the number of DMA mappings by 512. The folio order (and consequently, the size) is persisted in the struct drm_pagemap_device_addr to be available at the time of unmapping. v2: - Initialize order variable (Matthew Brost) - Set proto and dir for completeness (Matthew Brost) - Do not populate drm_pagemap_addr, document it (Matthew Brost) - Add and use macro NR_PAGES(order) (Matthew Brost) Cc: Matthew Brost Reviewed-by: Matthew Brost Acked-by: Maarten Lankhorst Link: https://lore.kernel.org/r/20250805140028.599361-4-francois.dugast@intel.com Signed-off-by: Francois Dugast --- diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c index e5c135d793ed9..0312edb8d4a86 100644 --- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -222,24 +222,32 @@ static int drm_pagemap_migrate_map_pages(struct device *dev, { unsigned long i; - for (i = 0; i < npages; ++i) { + for (i = 0; i < npages;) { struct page *page = migrate_pfn_to_page(migrate_pfn[i]); dma_addr_t dma_addr; + struct folio *folio; + unsigned int order = 0; if (!page) - continue; + goto next; if (WARN_ON_ONCE(is_zone_device_page(page))) return -EFAULT; - dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, dir); + folio = page_folio(page); + order = folio_order(folio); + + dma_addr = dma_map_page(dev, page, 0, page_size(page), dir); if (dma_mapping_error(dev, dma_addr)) return -EFAULT; pagemap_addr[i] = drm_pagemap_addr_encode(dma_addr, DRM_INTERCONNECT_SYSTEM, - 0, dir); + order, dir); + +next: + i += NR_PAGES(order); } return 0; @@ -263,11 +271,14 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev, { unsigned long i; - for (i = 0; i < npages; ++i) { + for (i = 0; i < npages;) { if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr)) - continue; + goto next; + + dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir); - dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE, dir); +next: + i += NR_PAGES(pagemap_addr[i].order); } } diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index 1d5919a991398..f6e7e234c0892 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -6,6 +6,8 @@ #include #include +#define NR_PAGES(order) (1U << (order)) + struct drm_pagemap; struct drm_pagemap_zdd; struct device; @@ -173,7 +175,9 @@ struct drm_pagemap_devmem_ops { * @pagemap_addr: Pointer to array of DMA information (source) * @npages: Number of pages to copy * - * Copy pages to device memory. + * Copy pages to device memory. If the order of a @pagemap_addr entry + * is greater than 0, the entry is populated but subsequent entries + * within the range of that order are not populated. * * Return: 0 on success, a negative error code on failure. */ @@ -187,7 +191,9 @@ struct drm_pagemap_devmem_ops { * @pagemap_addr: Pointer to array of DMA information (destination) * @npages: Number of pages to copy * - * Copy pages to system RAM. + * Copy pages to system RAM. If the order of a @pagemap_addr entry + * is greater than 0, the entry is populated but subsequent entries + * within the range of that order are not populated. * * Return: 0 on success, a negative error code on failure. */