* Serialized with zone lock.
*/
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
-FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
+PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
/**
* PageHuge - Determine if the page belongs to hugetlbfs
return s->size;
}
+static inline unsigned int large_kmalloc_order(const struct page *page)
+{
+ return page[1].flags.f & 0xff;
+}
+
+static inline size_t large_kmalloc_size(const struct page *page)
+{
+ return PAGE_SIZE << large_kmalloc_order(page);
+}
+
#ifdef CONFIG_SLUB_DEBUG
void dump_unreclaimable_slab(void);
#else
*/
size_t __ksize(const void *object)
{
- struct folio *folio;
+ const struct page *page;
+ const struct slab *slab;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
- folio = virt_to_folio(object);
+ page = virt_to_page(object);
- if (unlikely(!folio_test_slab(folio))) {
- if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
- return 0;
- if (WARN_ON(object != folio_address(folio)))
- return 0;
- return folio_size(folio);
- }
+ if (unlikely(PageLargeKmalloc(page)))
+ return large_kmalloc_size(page);
+
+ slab = page_slab(page);
+ /* Delete this after we're sure there are no users */
+ if (WARN_ON(!slab))
+ return page_size(page);
#ifdef CONFIG_SLUB_DEBUG
- skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+ skip_orig_size_check(slab->slab_cache, object);
#endif
- return slab_ksize(folio_slab(folio)->slab_cache);
+ return slab_ksize(slab->slab_cache);
}
gfp_t kmalloc_fix_flags(gfp_t flags)