]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/gem-shmem: Track folio accessed/dirty status in mmap
authorThomas Zimmermann <tzimmermann@suse.de>
Fri, 27 Feb 2026 11:42:10 +0000 (12:42 +0100)
committerThomas Zimmermann <tzimmermann@suse.de>
Wed, 11 Mar 2026 08:33:43 +0000 (09:33 +0100)
Invoke folio_mark_accessed() in mmap page faults to add the folio to
the memory manager's LRU list. Userspace invokes mmap to get the memory
for software rendering. Compositors do the same when creating the final
on-screen image, so keeping the pages in LRU makes sense. Avoids paging
out graphics buffers when under memory pressure.

In pfn_mkwrite, further invoke the folio_mark_dirty() to add the folio
for writeback should the underlying file be paged out from system memory.
This rarely happens in practice, yet it would corrupt the buffer content.

This has little effect on a system's hardware-accelerated rendering, which
only mmaps for an initial setup of textures, meshes, shaders, etc.

v4:
- test for VM_FAULT_NOPAGE before marking folio as accessed (Boris)
- test page-array bounds in mkwrite handler (Boris)
v3:
- rewrite for VM_PFNMAP
v2:
- adapt to changes in drm_gem_shmem_try_mmap_pmd()

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Link: https://patch.msgid.link/20260227114509.165572-6-tzimmermann@suse.de
drivers/gpu/drm/drm_gem_shmem_helper.c

index cefa50eaf7a4269bc618d562bfdb26f7ba6f61b3..1ab2bbd3860ca110cc80bad451579d87ad965e22 100644 (file)
@@ -598,6 +598,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
        if (ret != VM_FAULT_NOPAGE)
                ret = vmf_insert_pfn(vma, vmf->address, pfn);
 
+       if (ret == VM_FAULT_NOPAGE)
+               folio_mark_accessed(folio);
+
 out:
        dma_resv_unlock(obj->resv);
 
@@ -638,10 +641,29 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
        drm_gem_vm_close(vma);
 }
 
+static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       loff_t num_pages = obj->size >> PAGE_SHIFT;
+       pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+       if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+               return VM_FAULT_SIGBUS;
+
+       file_update_time(vma->vm_file);
+
+       folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+
+       return 0;
+}
+
 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
        .fault = drm_gem_shmem_fault,
        .open = drm_gem_shmem_vm_open,
        .close = drm_gem_shmem_vm_close,
+       .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
 };
 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);