]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
vmalloc: fix accounting with i915
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 23 Dec 2024 20:07:29 +0000 (20:07 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 2 Jan 2025 09:30:53 +0000 (10:30 +0100)
[ Upstream commit a2e740e216f5bf49ccb83b6d490c72a340558a43 ]

If the caller of vmap() specifies VM_MAP_PUT_PAGES (currently only the
i915 driver), we will decrement nr_vmalloc_pages and MEMCG_VMALLOC in
vfree().  These counters are incremented by vmalloc() but not by vmap() so
this will cause an underflow.  Check the VM_MAP_PUT_PAGES flag before
decrementing either counter.

Link: https://lkml.kernel.org/r/20241211202538.168311-1-willy@infradead.org
Fixes: b944afc9d64d ("mm: add a VM_MAP_PUT_PAGES flag for vmap")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Balbir Singh <balbirs@nvidia.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
mm/vmalloc.c

index a0b650f50faa32c5211329f2b6a65b942082d57a..7c6694514606e607a92c9b690a06fad083f397e3 100644 (file)
@@ -2709,7 +2709,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
                        struct page *page = area->pages[i];
 
                        BUG_ON(!page);
-                       mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
+                       if (!(area->flags & VM_MAP_PUT_PAGES))
+                               mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
                        /*
                         * High-order allocs for huge vmallocs are split, so
                         * can be freed as an array of order-0 allocations
@@ -2717,7 +2718,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
                        __free_pages(page, 0);
                        cond_resched();
                }
-               atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
+               if (!(area->flags & VM_MAP_PUT_PAGES))
+                       atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
 
                kvfree(area->pages);
        }