}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
+ struct page *page)
+{
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long paddr = pfn << PAGE_SHIFT;
+ bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
+
+ if (aligned &&
+ pmd_none(*vmf->pmd) &&
+ folio_test_pmd_mappable(page_folio(page))) {
+ pfn &= PMD_MASK >> PAGE_SHIFT;
+ if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
+ return true;
+ }
+#endif
+
+ return false;
+}
+
static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
vm_fault_t ret;
- struct page *page;
+ struct page **pages = shmem->pages;
pgoff_t page_offset;
+ unsigned long pfn;
/* Offset to faulty address in the VMA. */
page_offset = vmf->pgoff - vma->vm_pgoff;
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
- } else {
- page = shmem->pages[page_offset];
+ goto out;
+ }
- ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
+ if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
+ ret = VM_FAULT_NOPAGE;
+ goto out;
}
+ pfn = page_to_pfn(pages[page_offset]);
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
+ out:
dma_resv_unlock(shmem->base.resv);
return ret;