return folio_test_head(folio);
}
-static __always_inline void set_compound_head(struct page *page, struct page *head)
+static __always_inline void set_compound_head(struct page *tail,
+ const struct page *head, unsigned int order)
{
- WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
+ WRITE_ONCE(tail->compound_head, (unsigned long)head + 1);
}
static __always_inline void clear_compound_head(struct page *page)
/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
+ struct hstate *h,
unsigned long start_page_number,
unsigned long end_page_number)
{
struct page *page = folio_page(folio, start_page_number);
unsigned long head_pfn = folio_pfn(folio);
unsigned long pfn, end_pfn = head_pfn + end_page_number;
+ unsigned int order = huge_page_order(h);
/*
* As we marked all tail pages with memblock_reserved_mark_noinit(),
*/
for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
__init_single_page(page, pfn, zone, nid);
- prep_compound_tail((struct page *)folio, pfn - head_pfn);
+ prep_compound_tail(page, &folio->page, order);
set_page_count(page, 0);
}
}
__folio_set_head(folio);
ret = folio_ref_freeze(folio, 1);
VM_BUG_ON(!ret);
- hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
+ hugetlb_folio_init_tail_vmemmap(folio, h, 1, nr_pages);
prep_compound_head(&folio->page, huge_page_order(h));
}
* time as this is early in boot and there should
* be no contention.
*/
- hugetlb_folio_init_tail_vmemmap(folio,
+ hugetlb_folio_init_tail_vmemmap(folio, h,
HUGETLB_VMEMMAP_RESERVE_PAGES,
pages_per_huge_page(h));
}
INIT_LIST_HEAD(&folio->_deferred_list);
}
-static inline void prep_compound_tail(struct page *head, int tail_idx)
+static inline void prep_compound_tail(struct page *tail,
+ const struct page *head, unsigned int order)
{
- struct page *p = head + tail_idx;
-
- p->mapping = TAIL_MAPPING;
- set_compound_head(p, head);
- set_page_private(p, 0);
+ tail->mapping = TAIL_MAPPING;
+ set_compound_head(tail, head, order);
+ set_page_private(tail, 0);
}
void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
struct page *page = pfn_to_page(pfn);
__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
- prep_compound_tail(head, pfn - head_pfn);
+ prep_compound_tail(page, head, order);
set_page_count(page, 0);
}
prep_compound_head(head, order);
__SetPageHead(page);
for (i = 1; i < nr_pages; i++)
- prep_compound_tail(page, i);
+ prep_compound_tail(page + i, page, order);
prep_compound_head(page, order);
}