}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
-static vm_fault_t drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
- struct page *page)
+static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
{
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
- unsigned long pfn = page_to_pfn(page);
unsigned long paddr = pfn << PAGE_SHIFT;
- bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
+ bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
- if (aligned &&
- pmd_none(*vmf->pmd) &&
- folio_test_pmd_mappable(page_folio(page))) {
+ if (aligned && pmd_none(*vmf->pmd)) {
+ /* Read-only mapping; split upon write fault */
pfn &= PMD_MASK >> PAGE_SHIFT;
return vmf_insert_pfn_pmd(vmf, pfn, false);
}
struct page **pages = shmem->pages;
pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
struct page *page;
+ struct folio *folio;
unsigned long pfn;
dma_resv_lock(obj->resv, NULL);
page = pages[page_offset];
if (drm_WARN_ON_ONCE(dev, !page))
goto out;
-
- ret = drm_gem_shmem_try_map_pmd(vmf, vmf->address, page);
- if (ret == VM_FAULT_NOPAGE)
- goto out;
+ folio = page_folio(page);
pfn = page_to_pfn(page);
- ret = vmf_insert_pfn(vma, vmf->address, pfn);
- out:
+ if (folio_test_pmd_mappable(folio))
+ ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
+ if (ret != VM_FAULT_NOPAGE)
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
+out:
dma_resv_unlock(obj->resv);
return ret;