]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/shmem_helper: Make sure PMD entries get the writeable upgrade
authorBoris Brezillon <boris.brezillon@collabora.com>
Fri, 20 Mar 2026 15:19:13 +0000 (16:19 +0100)
committerBoris Brezillon <boris.brezillon@collabora.com>
Fri, 3 Apr 2026 08:11:04 +0000 (10:11 +0200)
Unlike PTEs which are automatically upgraded to writeable entries if
.pfn_mkwrite() returns 0, the PMD upgrades go through .huge_fault(),
and we currently pretend to have handled the make-writeable request
even though we only ever map things read-only. Make sure we pass the
proper "write" info to vmf_insert_pfn_pmd() in that case.

This also means we have to record the mkwrite event in the .huge_fault()
path now. Move the dirty tracking logic to a
drm_gem_shmem_record_mkwrite() helper so it can also be called from
drm_gem_shmem_pfn_mkwrite().

Note that this wasn't a problem before commit 28e3918179aa
("drm/gem-shmem: Track folio accessed/dirty status in mmap"), because
the pgprot were not lowered to read-only before this commit (see the
vma_wants_writenotify() in vma_set_page_prot()).

Fixes: 28e3918179aa ("drm/gem-shmem: Track folio accessed/dirty status in mmap")
Cc: Biju Das <biju.das.jz@bp.renesas.com>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
Reviewed-by: Loïc Molinari <loic.molinari@collabora.com>
Tested-by: Biju Das <biju.das.jz@bp.renesas.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Tested-by: Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
Link: https://patch.msgid.link/20260320151914.586945-1-boris.brezillon@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
drivers/gpu/drm/drm_gem_shmem_helper.c

index 2062ca607833036f13ccde65dfc5738977d4fb4f..545933c7f71214e9d675225793d9437be397f06a 100644 (file)
@@ -554,6 +554,21 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
 
+static void drm_gem_shmem_record_mkwrite(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       loff_t num_pages = obj->size >> PAGE_SHIFT;
+       pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+       if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+               return;
+
+       file_update_time(vma->vm_file);
+       folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+}
+
 static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
                                 unsigned long pfn)
 {
@@ -566,8 +581,23 @@ static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
 
                if (aligned &&
                    folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
+                       vm_fault_t ret;
+
                        pfn &= PMD_MASK >> PAGE_SHIFT;
-                       return vmf_insert_pfn_pmd(vmf, pfn, false);
+
+                       /* Unlike PTEs which are automatically upgraded to
+                        * writeable entries, the PMD upgrades go through
+                        * .huge_fault(). Make sure we pass the "write" info
+                        * along in that case.
+                        * This also means we have to record the write fault
+                        * here, instead of in .pfn_mkwrite().
+                        */
+                       ret = vmf_insert_pfn_pmd(vmf, pfn,
+                                                vmf->flags & FAULT_FLAG_WRITE);
+                       if (ret == VM_FAULT_NOPAGE && (vmf->flags & FAULT_FLAG_WRITE))
+                               drm_gem_shmem_record_mkwrite(vmf);
+
+                       return ret;
                }
 #endif
        }
@@ -655,19 +685,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
 
 static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
 {
-       struct vm_area_struct *vma = vmf->vma;
-       struct drm_gem_object *obj = vma->vm_private_data;
-       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-       loff_t num_pages = obj->size >> PAGE_SHIFT;
-       pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
-
-       if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
-               return VM_FAULT_SIGBUS;
-
-       file_update_time(vma->vm_file);
-
-       folio_mark_dirty(page_folio(shmem->pages[page_offset]));
-
+       drm_gem_shmem_record_mkwrite(vmf);
        return 0;
 }