{
unsigned long addr = (unsigned long)ptr;
unsigned long offset;
- struct folio *folio;
+ struct page *page;
+ struct slab *slab;
if (is_kmap_addr(ptr)) {
offset = offset_in_page(ptr);
if (!virt_addr_valid(ptr))
return;
- folio = virt_to_folio(ptr);
-
- if (folio_test_slab(folio)) {
+ page = virt_to_page(ptr);
+ slab = page_slab(page);
+ if (slab) {
/* Check slab allocator for flags and size. */
- __check_heap_object(ptr, n, folio_slab(folio), to_user);
- } else if (folio_test_large(folio)) {
- offset = ptr - folio_address(folio);
- if (n > folio_size(folio) - offset)
+ __check_heap_object(ptr, n, slab, to_user);
+ } else if (PageCompound(page)) {
+ page = compound_head(page);
+ offset = ptr - page_address(page);
+ if (n > page_size(page) - offset)
usercopy_abort("page alloc", NULL, to_user, offset, n);
}
+
+ /*
+ * We cannot check non-compound pages. They might be part of
+ * a large allocation, in which case crossing a page boundary
+ * is fine.
+ */
}
DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_HARDENED_USERCOPY_DEFAULT_ON,