}
EXPORT_SYMBOL(kmem_cache_free);
-static void free_large_kmalloc(struct folio *folio, void *object)
+static void free_large_kmalloc(struct page *page, void *object)
{
- unsigned int order = folio_order(folio);
+ unsigned int order = compound_order(page);
- if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) {
- dump_page(&folio->page, "Not a kmalloc allocation");
+ if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
+ dump_page(page, "Not a kmalloc allocation");
return;
}
kasan_kfree_large(object);
kmsan_kfree_large(object);
- lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
- __folio_clear_large_kmalloc(folio);
- free_frozen_pages(&folio->page, order);
+ __ClearPageLargeKmalloc(page);
+ free_frozen_pages(page, order);
}
/*
* consider folio order
*/
obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
- free_large_kmalloc(folio, obj);
+ free_large_kmalloc(&folio->page, obj);
return;
}
folio = virt_to_folio(object);
if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, (void *)object);
+ free_large_kmalloc(&folio->page, (void *)object);
return;
}
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, object);
+ free_large_kmalloc(&folio->page, object);
df->slab = NULL;
return size;
}