]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/panfrost: Fix a page leak in panfrost_mmu_map_fault_addr() when THP is on
authorBoris Brezillon <boris.brezillon@collabora.com>
Thu, 8 Jan 2026 12:33:25 +0000 (13:33 +0100)
committerAdrián Larumbe <adrian.larumbe@collabora.com>
Fri, 9 Jan 2026 16:29:30 +0000 (16:29 +0000)
drm_gem_put_pages(), which we rely on for returning BO pages to shmem,
assume per-folio refcounting and not per-page. If we call
shmem_read_mapping_page() per-page, we break this assumption and leak
pages every time we get a huge page allocated.

v2:
- Rework the logic for() loop to better match the folio-granular
  allocation scheme

Cc: Loïc Molinari <loic.molinari@collabora.com>
Fixes: c12e9fcb5a5a ("drm/panfrost: Introduce huge tmpfs mountpoint option")
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://patch.msgid.link/20260108123325.1088195-1-boris.brezillon@collabora.com
Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
drivers/gpu/drm/panfrost/panfrost_mmu.c

index 8f3b7a7b6ad099242b2dcdb6f8fad4fd56ca9b68..50ff308493618a741c527d6664fc7255a16dd3be 100644 (file)
@@ -587,12 +587,12 @@ out:
 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                                       u64 addr)
 {
-       int ret, i;
+       int ret;
        struct panfrost_gem_mapping *bomapping;
        struct panfrost_gem_object *bo;
        struct address_space *mapping;
        struct drm_gem_object *obj;
-       pgoff_t page_offset;
+       pgoff_t page_offset, nr_pages;
        struct sg_table *sgt;
        struct page **pages;
 
@@ -613,6 +613,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        addr &= ~((u64)SZ_2M - 1);
        page_offset = addr >> PAGE_SHIFT;
        page_offset -= bomapping->mmnode.start;
+       nr_pages = bo->base.base.size >> PAGE_SHIFT;
 
        obj = &bo->base.base;
 
@@ -626,8 +627,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                        goto err_unlock;
                }
 
-               pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
-                                      sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+               pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
                if (!pages) {
                        kvfree(bo->sgts);
                        bo->sgts = NULL;
@@ -649,20 +649,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        mapping = bo->base.base.filp->f_mapping;
        mapping_set_unevictable(mapping);
 
-       for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
-               /* Can happen if the last fault only partially filled this
-                * section of the pages array before failing. In that case
-                * we skip already filled pages.
-                */
-               if (pages[i])
-                       continue;
+       for (pgoff_t pg = page_offset; pg < page_offset + NUM_FAULT_PAGES;) {
+               bool already_owned = false;
+               struct folio *folio;
 
-               pages[i] = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(pages[i])) {
-                       ret = PTR_ERR(pages[i]);
-                       pages[i] = NULL;
+               folio = shmem_read_folio(mapping, pg);
+               if (IS_ERR(folio)) {
+                       ret = PTR_ERR(folio);
                        goto err_unlock;
                }
+
+               pg &= ~(folio_nr_pages(folio) - 1);
+               for (u32 i = 0; i < folio_nr_pages(folio) && pg < nr_pages; i++) {
+                       if (pages[pg])
+                               already_owned = true;
+
+                       pages[pg++] = folio_page(folio, i);
+               }
+
+               /* We always fill the page array at a folio granularity so
+                * there's no valid reason for a folio range to be partially
+                * populated.
+                */
+               if (drm_WARN_ON(&pfdev->base, already_owned))
+                       folio_put(folio);
        }
 
        ret = sg_alloc_table_from_pages(sgt, pages + page_offset,