#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
/* The below functions must be run on a range from a single zone. */
-extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- acr_flags_t alloc_flags, gfp_t gfp_mask);
-#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
-
-extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
- int nid, nodemask_t *nodemask);
-#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
-
+int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_frozen_range(...) \
+ alloc_hooks(alloc_contig_frozen_range_noprof(__VA_ARGS__))
+
+int alloc_contig_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_range(...) \
+ alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
+
+struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
+ gfp_t gfp_mask, int nid, nodemask_t *nodemask);
+#define alloc_contig_frozen_pages(...) \
+ alloc_hooks(alloc_contig_frozen_pages_noprof(__VA_ARGS__))
+
+struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
+#define alloc_contig_pages(...) \
+ alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
+
+void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages);
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
#endif
-#ifdef CONFIG_CONTIG_ALLOC
-static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
- int nid, nodemask_t *node)
-{
- struct page *page;
-
- if (WARN_ON(!order || !(gfp & __GFP_COMP)))
- return NULL;
-
- page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
-
- return page ? page_folio(page) : NULL;
-}
-#else
-static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
- int nid, nodemask_t *node)
-{
- return NULL;
-}
-#endif
-/* This should be paired with folio_put() rather than free_contig_range(). */
-#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
-
DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
#endif /* __LINUX_GFP_H */
return (ret < 0) ? ret : 0;
}
-static void split_free_pages(struct list_head *list, gfp_t gfp_mask)
+static void split_free_frozen_pages(struct list_head *list, gfp_t gfp_mask)
{
int order;
int i;
post_alloc_hook(page, order, gfp_mask);
- set_page_refcounted(page);
if (!order)
continue;
- split_page(page, order);
+ __split_page(page, order);
/* Add all subpages to the order-0 head, in sequence. */
list_del(&page->lru);
return 0;
}
+static void __free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
+{
+ for (; nr_pages--; pfn++)
+ free_frozen_pages(pfn_to_page(pfn), 0);
+}
+
/**
- * alloc_contig_range() -- tries to allocate given range of pages
+ * alloc_contig_frozen_range() -- tries to allocate given range of frozen pages
* @start: start PFN to allocate
* @end: one-past-the-last PFN to allocate
* @alloc_flags: allocation information
* pageblocks in the range. Once isolated, the pageblocks should not
* be modified by others.
*
- * Return: zero on success or negative error code. On success all
- * pages which PFN is in [start, end) are allocated for the caller and
- * need to be freed with free_contig_range().
+ * All frozen pages which PFN is in [start, end) are allocated for the
+ * caller, and they could be freed with free_contig_frozen_range(),
+ * free_frozen_pages() also could be used to free compound frozen pages
+ * directly.
+ *
+ * Return: zero on success or negative error code.
*/
-int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- acr_flags_t alloc_flags, gfp_t gfp_mask)
+int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask)
{
const unsigned int order = ilog2(end - start);
unsigned long outer_start, outer_end;
}
if (!(gfp_mask & __GFP_COMP)) {
- split_free_pages(cc.freepages, gfp_mask);
+ split_free_frozen_pages(cc.freepages, gfp_mask);
/* Free head and tail (if any) */
if (start != outer_start)
- free_contig_range(outer_start, start - outer_start);
+ __free_contig_frozen_range(outer_start, start - outer_start);
if (end != outer_end)
- free_contig_range(end, outer_end - end);
+ __free_contig_frozen_range(end, outer_end - end);
} else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
struct page *head = pfn_to_page(start);
check_new_pages(head, order);
prep_new_page(head, order, gfp_mask, 0);
- set_page_refcounted(head);
} else {
ret = -EINVAL;
WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
undo_isolate_page_range(start, end);
return ret;
}
-EXPORT_SYMBOL(alloc_contig_range_noprof);
+EXPORT_SYMBOL(alloc_contig_frozen_range_noprof);
-static int __alloc_contig_pages(unsigned long start_pfn,
- unsigned long nr_pages, gfp_t gfp_mask)
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start: start PFN to allocate
+ * @end: one-past-the-last PFN to allocate
+ * @alloc_flags: allocation information
+ * @gfp_mask: GFP mask.
+ *
+ * This routine is a wrapper around alloc_contig_frozen_range(), it can't
+ * be used to allocate compound pages, the refcount of each allocated page
+ * will be set to one.
+ *
+ * All pages which PFN is in [start, end) are allocated for the caller,
+ * and should be freed with free_contig_range() or by manually calling
+ * __free_page() on each allocated page.
+ *
+ * Return: zero on success or negative error code.
+ */
+int alloc_contig_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask)
{
- unsigned long end_pfn = start_pfn + nr_pages;
+ int ret;
- return alloc_contig_range_noprof(start_pfn, end_pfn, ACR_FLAGS_NONE,
- gfp_mask);
+ if (WARN_ON(gfp_mask & __GFP_COMP))
+ return -EINVAL;
+
+ ret = alloc_contig_frozen_range_noprof(start, end, alloc_flags, gfp_mask);
+ if (!ret)
+ set_pages_refcounted(pfn_to_page(start), end - start);
+
+ return ret;
}
+EXPORT_SYMBOL(alloc_contig_range_noprof);
static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
unsigned long nr_pages, bool skip_hugetlb,
}
/**
- * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
+ * alloc_contig_frozen_pages() -- tries to find and allocate contiguous range of frozen pages
* @nr_pages: Number of contiguous pages to allocate
* @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
* action and reclaim modifiers are supported. Reclaim modifiers
* @nid: Target node
* @nodemask: Mask for other possible nodes
*
- * This routine is a wrapper around alloc_contig_range(). It scans over zones
- * on an applicable zonelist to find a contiguous pfn range which can then be
- * tried for allocation with alloc_contig_range(). This routine is intended
- * for allocation requests which can not be fulfilled with the buddy allocator.
+ * This routine is a wrapper around alloc_contig_frozen_range(). It scans over
+ * zones on an applicable zonelist to find a contiguous pfn range which can then
+ * be tried for allocation with alloc_contig_frozen_range(). This routine is
+ * intended for allocation requests which can not be fulfilled with the buddy
+ * allocator.
*
* The allocated memory is always aligned to a page boundary. If nr_pages is a
* power of two, then allocated range is also guaranteed to be aligned to same
* nr_pages (e.g. 1GB request would be aligned to 1GB).
*
- * Allocated pages can be freed with free_contig_range() or by manually calling
- * __free_page() on each allocated page.
+ * Allocated frozen pages need be freed with free_contig_frozen_range(),
+ * or by manually calling free_frozen_pages() on each allocated frozen
+ * non-compound page, for compound frozen pages could be freed with
+ * free_frozen_pages() directly.
*
- * Return: pointer to contiguous pages on success, or NULL if not successful.
+ * Return: pointer to contiguous frozen pages on success, or NULL if not successful.
*/
-struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
- int nid, nodemask_t *nodemask)
+struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
+ gfp_t gfp_mask, int nid, nodemask_t *nodemask)
{
unsigned long ret, pfn, flags;
struct zonelist *zonelist;
&skipped_hugetlb)) {
/*
* We release the zone lock here because
- * alloc_contig_range() will also lock the zone
- * at some point. If there's an allocation
- * spinning on this lock, it may win the race
- * and cause alloc_contig_range() to fail...
+ * alloc_contig_frozen_range() will also lock
+ * the zone at some point. If there's an
+ * allocation spinning on this lock, it may
+ * win the race and cause allocation to fail.
*/
spin_unlock_irqrestore(&zone->lock, flags);
- ret = __alloc_contig_pages(pfn, nr_pages,
+ ret = alloc_contig_frozen_range_noprof(pfn,
+ pfn + nr_pages,
+ ACR_FLAGS_NONE,
gfp_mask);
if (!ret)
return pfn_to_page(pfn);
}
return NULL;
}
+EXPORT_SYMBOL(alloc_contig_frozen_pages_noprof);
-void free_contig_range(unsigned long pfn, unsigned long nr_pages)
+/**
+ * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
+ * @nr_pages: Number of contiguous pages to allocate
+ * @gfp_mask: GFP mask.
+ * @nid: Target node
+ * @nodemask: Mask for other possible nodes
+ *
+ * This routine is a wrapper around alloc_contig_frozen_pages(), it can't
+ * be used to allocate compound pages, the refcount of each allocated page
+ * will be set to one.
+ *
+ * Allocated pages can be freed with free_contig_range() or by manually
+ * calling __free_page() on each allocated page.
+ *
+ * Return: pointer to contiguous pages on success, or NULL if not successful.
+ */
+struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
{
- unsigned long count = 0;
- struct folio *folio = pfn_folio(pfn);
+ struct page *page;
- if (folio_test_large(folio)) {
- int expected = folio_nr_pages(folio);
+ if (WARN_ON(gfp_mask & __GFP_COMP))
+ return NULL;
- if (nr_pages == expected)
- folio_put(folio);
- else
- WARN(true, "PFN %lu: nr_pages %lu != expected %d\n",
- pfn, nr_pages, expected);
+ page = alloc_contig_frozen_pages_noprof(nr_pages, gfp_mask, nid,
+ nodemask);
+ if (page)
+ set_pages_refcounted(page, nr_pages);
+
+ return page;
+}
+EXPORT_SYMBOL(alloc_contig_pages_noprof);
+
+/**
+ * free_contig_frozen_range() -- free the contiguous range of frozen pages
+ * @pfn: start PFN to free
+ * @nr_pages: Number of contiguous frozen pages to free
+ *
+ * This can be used to free the allocated compound/non-compound frozen pages.
+ */
+void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
+{
+ struct page *first_page = pfn_to_page(pfn);
+ const unsigned int order = ilog2(nr_pages);
+
+ if (WARN_ON_ONCE(first_page != compound_head(first_page)))
+ return;
+
+ if (PageHead(first_page)) {
+ WARN_ON_ONCE(order != compound_order(first_page));
+ free_frozen_pages(first_page, order);
return;
}
- for (; nr_pages--; pfn++) {
- struct page *page = pfn_to_page(pfn);
+ __free_contig_frozen_range(pfn, nr_pages);
+}
+EXPORT_SYMBOL(free_contig_frozen_range);
+
+/**
+ * free_contig_range() -- free the contiguous range of pages
+ * @pfn: start PFN to free
+ * @nr_pages: Number of contiguous pages to free
+ *
+ * This can be only used to free the allocated non-compound pages.
+ */
+void free_contig_range(unsigned long pfn, unsigned long nr_pages)
+{
+ if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn))))
+ return;
- count += page_count(page) != 1;
- __free_page(page);
- }
- WARN(count != 0, "%lu pages are still in use!\n", count);
+ for (; nr_pages--; pfn++)
+ __free_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL(free_contig_range);
#endif /* CONFIG_CONTIG_ALLOC */