if (atomic_read(&vm->config_changed))
return -EAGAIN;
- rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
+ rc = alloc_contig_range(pfn, pfn + nr_pages, ACR_FLAGS_NONE,
GFP_KERNEL);
if (rc == -ENOMEM)
/* whoops, out of memory */
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request
+#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
+
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask);
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
}
#endif
-#define MEMORY_OFFLINE 0x1
-#define REPORT_FAILURE 0x2
+/*
+ * Pageblock isolation modes:
+ * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
+ * e.g., skip over PageHWPoison() pages and
+ * PageOffline() pages. Unmovable pages will be
+ * reported in this mode.
+ * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
+ * PB_ISOLATE_MODE_OTHER - isolate for other purposes
+ */
+enum pb_isolate_mode {
+ PB_ISOLATE_MODE_MEM_OFFLINE,
+ PB_ISOLATE_MODE_CMA_ALLOC,
+ PB_ISOLATE_MODE_OTHER,
+};
void __meminit init_pageblock_migratetype(struct page *page,
enum migratetype migratetype,
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags);
+ enum pb_isolate_mode mode);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags);
+ enum pb_isolate_mode mode);
#endif
__entry->change_ownership)
);
+#ifdef CONFIG_CONTIG_ALLOC
TRACE_EVENT(mm_alloc_contig_migrate_range_info,
TP_PROTO(unsigned long start,
unsigned long nr_migrated,
unsigned long nr_reclaimed,
unsigned long nr_mapped,
- int migratetype),
+ acr_flags_t alloc_flags),
- TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype),
+ TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, alloc_flags),
TP_STRUCT__entry(
__field(unsigned long, start)
__field(unsigned long, nr_migrated)
__field(unsigned long, nr_reclaimed)
__field(unsigned long, nr_mapped)
- __field(int, migratetype)
+ __field(acr_flags_t, alloc_flags)
),
TP_fast_assign(
__entry->nr_migrated = nr_migrated;
__entry->nr_reclaimed = nr_reclaimed;
__entry->nr_mapped = nr_mapped;
- __entry->migratetype = migratetype;
+ __entry->alloc_flags = alloc_flags;
),
- TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
+ TP_printk("start=0x%lx end=0x%lx alloc_flags=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
__entry->start,
__entry->end,
- __entry->migratetype,
+ __entry->alloc_flags,
__entry->nr_migrated,
__entry->nr_reclaimed,
__entry->nr_mapped)
);
+#endif
TRACE_EVENT(mm_setup_per_zone_wmarks,
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma->alloc_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
+ ret = alloc_contig_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
mutex_unlock(&cma->alloc_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
- MIGRATE_MOVABLE,
- MEMORY_OFFLINE | REPORT_FAILURE);
+ PB_ISOLATE_MODE_MEM_OFFLINE);
if (ret) {
reason = "failure to isolate range";
goto failed_removal_pcplists_disabled;
goto failed_removal_isolated;
}
- ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
+ ret = test_pages_isolated(start_pfn, end_pfn,
+ PB_ISOLATE_MODE_MEM_OFFLINE);
} while (ret);
/*
* [start, end) must belong to a single zone.
- * @migratetype: using migratetype to filter the type of migration in
+ * @alloc_flags: using acr_flags_t to filter the type of migration in
* trace_mm_alloc_contig_migrate_range_info.
*/
static int __alloc_contig_migrate_range(struct compact_control *cc,
- unsigned long start, unsigned long end, int migratetype)
+ unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags)
{
/* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed;
putback_movable_pages(&cc->migratepages);
}
- trace_mm_alloc_contig_migrate_range_info(start, end, migratetype,
+ trace_mm_alloc_contig_migrate_range_info(start, end, alloc_flags,
total_migrated,
total_reclaimed,
total_mapped);
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
* @end: one-past-the-last PFN to allocate
- * @migratetype: migratetype of the underlying pageblocks (either
- * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
- * in range must have the same migratetype and it must
- * be either of the two.
+ * @alloc_flags: allocation information
* @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
* action and reclaim modifiers are supported. Reclaim modifiers
* control allocation behavior during compaction/migration/reclaim.
* need to be freed with free_contig_range().
*/
int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask)
+ acr_flags_t alloc_flags, gfp_t gfp_mask)
{
unsigned long outer_start, outer_end;
int ret = 0;
.alloc_contig = true,
};
INIT_LIST_HEAD(&cc.migratepages);
+ enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ?
+ PB_ISOLATE_MODE_CMA_ALLOC :
+ PB_ISOLATE_MODE_OTHER;
gfp_mask = current_gfp_context(gfp_mask);
if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
* put back to page allocator so that buddy can use them.
*/
- ret = start_isolate_page_range(start, end, migratetype, 0);
+ ret = start_isolate_page_range(start, end, mode);
if (ret)
goto done;
* allocated. So, if we fall through be sure to clear ret so that
* -EBUSY is not accidentally used or returned to caller.
*/
- ret = __alloc_contig_migrate_range(&cc, start, end, migratetype);
+ ret = __alloc_contig_migrate_range(&cc, start, end, alloc_flags);
if (ret && ret != -EBUSY)
goto done;
outer_start = find_large_buddy(start);
/* Make sure the range is really isolated. */
- if (test_pages_isolated(outer_start, end, 0)) {
+ if (test_pages_isolated(outer_start, end, mode)) {
ret = -EBUSY;
goto done;
}
{
unsigned long end_pfn = start_pfn + nr_pages;
- return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE,
- gfp_mask);
+ return alloc_contig_range_noprof(start_pfn, end_pfn, ACR_FLAGS_NONE,
+ gfp_mask);
}
static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
*
*/
static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags)
+ enum pb_isolate_mode mode)
{
struct page *page = pfn_to_page(start_pfn);
struct zone *zone = page_zone(page);
* isolate CMA pageblocks even when they are not movable in fact
* so consider them movable here.
*/
- if (is_migrate_cma(migratetype))
+ if (mode == PB_ISOLATE_MODE_CMA_ALLOC)
return NULL;
return page;
* The HWPoisoned page may be not in buddy system, and
* page_count() is not 0.
*/
- if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
+ if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageHWPoison(page))
continue;
/*
* move these pages that still have a reference count > 0.
* (false negatives in this function only)
*/
- if ((flags & MEMORY_OFFLINE) && PageOffline(page))
+ if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageOffline(page))
continue;
if (__PageMovable(page) || PageLRU(page))
* present in [start_pfn, end_pfn). The pageblock must intersect with
* [start_pfn, end_pfn).
*/
-static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags,
+static int set_migratetype_isolate(struct page *page, enum pb_isolate_mode mode,
unsigned long start_pfn, unsigned long end_pfn)
{
struct zone *zone = page_zone(page);
end_pfn);
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
- migratetype, isol_flags);
+ mode);
if (!unmovable) {
if (!pageblock_isolate_and_move_free_pages(zone, page)) {
spin_unlock_irqrestore(&zone->lock, flags);
}
spin_unlock_irqrestore(&zone->lock, flags);
- if (isol_flags & REPORT_FAILURE) {
+ if (mode == PB_ISOLATE_MODE_MEM_OFFLINE) {
/*
* printk() with zone->lock held will likely trigger a
* lockdep splat, so defer it here.
* isolate_single_pageblock() -- tries to isolate a pageblock that might be
* within a free or in-use page.
* @boundary_pfn: pageblock-aligned pfn that a page might cross
- * @flags: isolation flags
+ * @mode: isolation mode
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
* isolate_single_pageblock()
- * @migratetype: migrate type to set in error recovery.
*
* Free and in-use pages can be as big as MAX_PAGE_ORDER and contain more than one
* pageblock. When not all pageblocks within a page are isolated at the same
* either. The function handles this by splitting the free page or migrating
* the in-use page then splitting the free page.
*/
-static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
- bool isolate_before, bool skip_isolation, int migratetype)
+static int isolate_single_pageblock(unsigned long boundary_pfn,
+ enum pb_isolate_mode mode, bool isolate_before,
+ bool skip_isolation)
{
unsigned long start_pfn;
unsigned long isolate_pageblock;
zone->zone_start_pfn);
if (skip_isolation) {
- int mt __maybe_unused = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
-
- VM_BUG_ON(!is_migrate_isolate(mt));
+ VM_BUG_ON(!get_pageblock_isolate(pfn_to_page(isolate_pageblock)));
} else {
- ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype,
- flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+ ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock),
+ mode, isolate_pageblock,
+ isolate_pageblock + pageblock_nr_pages);
if (ret)
return ret;
* start_isolate_page_range() - mark page range MIGRATE_ISOLATE
* @start_pfn: The first PFN of the range to be isolated.
* @end_pfn: The last PFN of the range to be isolated.
- * @migratetype: Migrate type to set in error recovery.
- * @flags: The following flags are allowed (they can be combined in
- * a bit mask)
- * MEMORY_OFFLINE - isolate to offline (!allocate) memory
- * e.g., skip over PageHWPoison() pages
- * and PageOffline() pages.
- * REPORT_FAILURE - report details about the failure to
- * isolate the range
+ * @mode: isolation mode
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags)
+ enum pb_isolate_mode mode)
{
unsigned long pfn;
struct page *page;
bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, false,
- skip_isolation, migratetype);
+ ret = isolate_single_pageblock(isolate_start, mode, false,
+ skip_isolation);
if (ret)
return ret;
skip_isolation = true;
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, true,
- skip_isolation, migratetype);
+ ret = isolate_single_pageblock(isolate_end, mode, true, skip_isolation);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start));
return ret;
pfn < isolate_end - pageblock_nr_pages;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
- if (page && set_migratetype_isolate(page, migratetype, flags,
- start_pfn, end_pfn)) {
+ if (page && set_migratetype_isolate(page, mode, start_pfn,
+ end_pfn)) {
undo_isolate_page_range(isolate_start, pfn);
unset_migratetype_isolate(
pfn_to_page(isolate_end - pageblock_nr_pages));
*/
static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
- int flags)
+ enum pb_isolate_mode mode)
{
struct page *page;
* simple way to verify that as VM_BUG_ON(), though.
*/
pfn += 1 << buddy_order(page);
- else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
+ else if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) &&
+ PageHWPoison(page))
/* A HWPoisoned page cannot be also PageBuddy */
pfn++;
- else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
- !page_count(page))
+ else if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) &&
+ PageOffline(page) && !page_count(page))
/*
* The responsible driver agreed to skip PageOffline()
* pages when offlining memory by dropping its
* test_pages_isolated - check if pageblocks in range are isolated
* @start_pfn: The first PFN of the isolated range
* @end_pfn: The first PFN *after* the isolated range
- * @isol_flags: Testing mode flags
+ * @mode: Testing mode
*
* This tests if all in the specified range are free.
*
- * If %MEMORY_OFFLINE is specified in @flags, it will consider
+ * If %PB_ISOLATE_MODE_MEM_OFFLINE specified in @mode, it will consider
* poisoned and offlined pages free as well.
*
* Caller must ensure the requested range doesn't span zones.
* Returns 0 if true, -EBUSY if one or more pages are in use.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags)
+ enum pb_isolate_mode mode)
{
unsigned long pfn, flags;
struct page *page;
/* Check all pages are free or marked as ISOLATED */
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
- pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
+ pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, mode);
spin_unlock_irqrestore(&zone->lock, flags);
ret = pfn < end_pfn ? -EBUSY : 0;