In order to separate slabs from folios, we need to convert from any page
in a slab to the slab directly without going through a page to folio
conversion first.
Up to this point, page_slab() has followed the example of other memdesc
converters (page_folio(), page_ptdesc() etc) and just cast the pointer
to the requested type, regardless of whether the pointer is actually a
pointer to the correct type or not.
That changes with this commit; we check that the page actually belongs
to a slab and return NULL if it does not. Other memdesc converters will
adopt this convention in future.
kfence was the only user of page_slab(), so adjust it to the new way
of working. It will need to be touched again when we separate slab
from page.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Marco Elver <elver@google.com>
Cc: kasan-dev@googlegroups.com
Link: https://patch.msgid.link/20251113000932.1589073-2-willy@infradead.org
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Tested-by: Marco Elver <elver@google.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
*/
PAGE_TYPE_OPS(Guard, guard, guard)
-FOLIO_TYPE_OPS(slab, slab)
-
-/**
- * PageSlab - Determine if the page belongs to the slab allocator
- * @page: The page to test.
- *
- * Context: Any context.
- * Return: True for slab pages, false for any other kind of page.
- */
-static inline bool PageSlab(const struct page *page)
-{
- return folio_test_slab(page_folio(page));
-}
+PAGE_TYPE_OPS(Slab, slab, slab)
#ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb, hugetlb)
* enters __slab_free() slow-path.
*/
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
- struct slab *slab;
+ struct page *page;
if (!i || (i % 2))
continue;
- slab = page_slab(pfn_to_page(start_pfn + i));
- __folio_set_slab(slab_folio(slab));
+ page = pfn_to_page(start_pfn + i);
+ __SetPageSlab(page);
#ifdef CONFIG_MEMCG
+ struct slab *slab = page_slab(page);
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
MEMCG_DATA_OBJEXTS;
#endif
reset_slab:
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
- struct slab *slab;
+ struct page *page;
if (!i || (i % 2))
continue;
- slab = page_slab(pfn_to_page(start_pfn + i));
+ page = pfn_to_page(start_pfn + i);
#ifdef CONFIG_MEMCG
+ struct slab *slab = page_slab(page);
slab->obj_exts = 0;
#endif
- __folio_clear_slab(slab_folio(slab));
+ __ClearPageSlab(page);
}
return addr;
struct slab *: (struct folio *)s))
/**
- * page_slab - Converts from first struct page to slab.
- * @p: The first (either head of compound or single) page of slab.
+ * page_slab - Converts from struct page to its slab.
+ * @page: A page which may or may not belong to a slab.
*
- * A temporary wrapper to convert struct page to struct slab in situations where
- * we know the page is the compound head, or single order-0 page.
- *
- * Long-term ideally everything would work with struct slab directly or go
- * through folio to struct slab.
- *
- * Return: The slab which contains this page
+ * Return: The slab which contains this page or NULL if the page does
+ * not belong to a slab. This includes pages returned from large kmalloc.
*/
-#define page_slab(p) (_Generic((p), \
- const struct page *: (const struct slab *)(p), \
- struct page *: (struct slab *)(p)))
+static inline struct slab *page_slab(const struct page *page)
+{
+ unsigned long head;
+
+ head = READ_ONCE(page->compound_head);
+ if (head & 1)
+ page = (struct page *)(head - 1);
+ if (data_race(page->page_type >> 24) != PGTY_slab)
+ page = NULL;
+
+ return (struct slab *)page;
+}
/**
* slab_page - The first struct page allocated for a slab