1 // SPDX-License-Identifier: GPL-2.0
5 * mm/ specific debug routines.
9 #include <linux/kernel.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
19 #include <trace/events/migrate.h>
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
30 const char *migrate_reason_names
[MR_TYPES
] = {
34 const struct trace_print_flags pageflag_names
[] = {
39 const struct trace_print_flags pagetype_names
[] = {
44 const struct trace_print_flags gfpflag_names
[] = {
49 const struct trace_print_flags vmaflag_names
[] = {
54 static void __dump_page(struct page
*page
)
56 struct folio
*folio
= page_folio(page
);
57 struct page
*head
= &folio
->page
;
58 struct address_space
*mapping
;
59 bool compound
= PageCompound(page
);
61 * Accessing the pageblock without the zone lock. It could change to
62 * "isolate" again in the meantime, but since we are just dumping the
63 * state for debugging, it should be fine to accept a bit of
64 * inaccuracy here due to racing.
66 bool page_cma
= is_migrate_cma_page(page
);
70 if (page
< head
|| (page
>= head
+ MAX_ORDER_NR_PAGES
)) {
72 * Corrupt page, so we cannot call page_mapping. Instead, do a
73 * safe subset of the steps that page_mapping() does. Caution:
74 * this will be misleading for tail pages, PageSwapCache pages,
75 * and potentially other situations. (See the page_mapping()
76 * implementation for what's missing here.)
78 unsigned long tmp
= (unsigned long)page
->mapping
;
80 if (tmp
& PAGE_MAPPING_ANON
)
83 mapping
= (void *)(tmp
& ~PAGE_MAPPING_FLAGS
);
85 folio
= (struct folio
*)page
;
88 mapping
= page_mapping(page
);
92 * Avoid VM_BUG_ON() in page_mapcount().
93 * page->_mapcount space in struct page is used by sl[aou]b pages to
96 mapcount
= PageSlab(head
) ? 0 : page_mapcount(page
);
98 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
99 page
, page_ref_count(head
), mapcount
, mapping
,
100 page_to_pgoff(page
), page_to_pfn(page
));
102 pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
103 head
, compound_order(head
),
104 folio_entire_mapcount(folio
),
105 folio_nr_pages_mapped(folio
),
106 atomic_read(&folio
->_pincount
));
110 if (head
->memcg_data
)
111 pr_warn("memcg:%lx\n", head
->memcg_data
);
115 else if (PageAnon(page
))
118 dump_mapping(mapping
);
119 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names
) != __NR_PAGEFLAGS
+ 1);
121 pr_warn("%sflags: %pGp%s\n", type
, &head
->flags
,
122 page_cma
? " CMA" : "");
123 pr_warn("page_type: %pGt\n", &head
->page_type
);
125 print_hex_dump(KERN_WARNING
, "raw: ", DUMP_PREFIX_NONE
, 32,
126 sizeof(unsigned long), page
,
127 sizeof(struct page
), false);
129 print_hex_dump(KERN_WARNING
, "head: ", DUMP_PREFIX_NONE
, 32,
130 sizeof(unsigned long), head
,
131 sizeof(struct page
), false);
134 void dump_page(struct page
*page
, const char *reason
)
136 if (PagePoisoned(page
))
137 pr_warn("page:%p is uninitialized and poisoned", page
);
141 pr_warn("page dumped because: %s\n", reason
);
142 dump_page_owner(page
);
144 EXPORT_SYMBOL(dump_page
);
146 #ifdef CONFIG_DEBUG_VM
148 void dump_vma(const struct vm_area_struct
*vma
)
150 pr_emerg("vma %px start %px end %px mm %px\n"
151 "prot %lx anon_vma %px vm_ops %px\n"
152 "pgoff %lx file %px private_data %px\n"
153 "flags: %#lx(%pGv)\n",
154 vma
, (void *)vma
->vm_start
, (void *)vma
->vm_end
, vma
->vm_mm
,
155 (unsigned long)pgprot_val(vma
->vm_page_prot
),
156 vma
->anon_vma
, vma
->vm_ops
, vma
->vm_pgoff
,
157 vma
->vm_file
, vma
->vm_private_data
,
158 vma
->vm_flags
, &vma
->vm_flags
);
160 EXPORT_SYMBOL(dump_vma
);
162 void dump_mm(const struct mm_struct
*mm
)
164 pr_emerg("mm %px task_size %lu\n"
166 "get_unmapped_area %px\n"
168 "mmap_base %lu mmap_legacy_base %lu\n"
169 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
170 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
171 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
172 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
173 "start_brk %lx brk %lx start_stack %lx\n"
174 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
175 "binfmt %px flags %lx\n"
183 #ifdef CONFIG_MMU_NOTIFIER
184 "notifier_subscriptions %px\n"
186 #ifdef CONFIG_NUMA_BALANCING
187 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
189 "tlb_flush_pending %d\n"
190 "def_flags: %#lx(%pGv)\n",
194 mm
->get_unmapped_area
,
196 mm
->mmap_base
, mm
->mmap_legacy_base
,
197 mm
->pgd
, atomic_read(&mm
->mm_users
),
198 atomic_read(&mm
->mm_count
),
199 mm_pgtables_bytes(mm
),
201 mm
->hiwater_rss
, mm
->hiwater_vm
, mm
->total_vm
, mm
->locked_vm
,
202 (u64
)atomic64_read(&mm
->pinned_vm
),
203 mm
->data_vm
, mm
->exec_vm
, mm
->stack_vm
,
204 mm
->start_code
, mm
->end_code
, mm
->start_data
, mm
->end_data
,
205 mm
->start_brk
, mm
->brk
, mm
->start_stack
,
206 mm
->arg_start
, mm
->arg_end
, mm
->env_start
, mm
->env_end
,
207 mm
->binfmt
, mm
->flags
,
215 #ifdef CONFIG_MMU_NOTIFIER
216 mm
->notifier_subscriptions
,
218 #ifdef CONFIG_NUMA_BALANCING
219 mm
->numa_next_scan
, mm
->numa_scan_offset
, mm
->numa_scan_seq
,
221 atomic_read(&mm
->tlb_flush_pending
),
222 mm
->def_flags
, &mm
->def_flags
225 EXPORT_SYMBOL(dump_mm
);
227 static bool page_init_poisoning __read_mostly
= true;
229 static int __init
setup_vm_debug(char *str
)
231 bool __page_init_poisoning
= true;
234 * Calling vm_debug with no arguments is equivalent to requesting
235 * to enable all debugging options we can control.
237 if (*str
++ != '=' || !*str
)
240 __page_init_poisoning
= false;
245 switch (tolower(*str
)) {
247 __page_init_poisoning
= true;
250 pr_err("vm_debug option '%c' unknown. skipped\n",
257 if (page_init_poisoning
&& !__page_init_poisoning
)
258 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
260 page_init_poisoning
= __page_init_poisoning
;
264 __setup("vm_debug", setup_vm_debug
);
266 void page_init_poison(struct page
*page
, size_t size
)
268 if (page_init_poisoning
)
269 memset(page
, PAGE_POISON_PATTERN
, size
);
272 void vma_iter_dump_tree(const struct vma_iterator
*vmi
)
274 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
276 mt_dump(vmi
->mas
.tree
, mt_dump_hex
);
277 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
280 #endif /* CONFIG_DEBUG_VM */