]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - mm/debug.c
Merge tag 'cocci-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/jlawall...
[thirdparty/kernel/linux.git] / mm / debug.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
31c9afa6
SL
2/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
82742a3a
SL
9#include <linux/kernel.h>
10#include <linux/mm.h>
af658dca 11#include <linux/trace_events.h>
82742a3a 12#include <linux/memcontrol.h>
420adbe9 13#include <trace/events/mmflags.h>
7cd12b4a 14#include <linux/migrate.h>
4e462112 15#include <linux/page_owner.h>
f682a97a 16#include <linux/ctype.h>
82742a3a 17
edf14cdb 18#include "internal.h"
8eb42bea
JH
19#include <trace/events/migrate.h>
20
21/*
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
24 */
25#undef EM
26#undef EMe
27#define EM(a, b) b,
28#define EMe(a, b) b
edf14cdb 29
9a2f45ff 30const char *migrate_reason_names[MR_TYPES] = {
8eb42bea 31 MIGRATE_REASON
7cd12b4a
VB
32};
33
edf14cdb
VB
34const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
37};
38
4c85c0be
HY
39const struct trace_print_flags pagetype_names[] = {
40 __def_pagetype_names,
41 {0, NULL}
42};
43
edf14cdb
VB
44const struct trace_print_flags gfpflag_names[] = {
45 __def_gfpflag_names,
46 {0, NULL}
420adbe9
VB
47};
48
edf14cdb
VB
49const struct trace_print_flags vmaflag_names[] = {
50 __def_vmaflag_names,
51 {0, NULL}
82742a3a
SL
52};
53
fae7d834
MWO
54static void __dump_folio(struct folio *folio, struct page *page,
55 unsigned long pfn, unsigned long idx)
82742a3a 56{
fae7d834 57 struct address_space *mapping = folio_mapping(folio);
7441d349 58 int mapcount = atomic_read(&page->_mapcount);
5b57b8f2 59 char *type = "";
fc36def9 60
7441d349 61 mapcount = page_type_has_type(mapcount) ? 0 : mapcount + 1;
fae7d834
MWO
62 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
63 folio_ref_count(folio), mapcount, mapping,
64 folio->index + idx, pfn);
65 if (folio_test_large(folio)) {
05c5323b 66 pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
fae7d834 67 folio_order(folio),
05c5323b 68 folio_mapcount(folio),
91ec7f28 69 folio_entire_mapcount(folio),
eec20426 70 folio_nr_pages_mapped(folio),
94688e8e 71 atomic_read(&folio->_pincount));
452b557c 72 }
91f5345a
MWO
73
74#ifdef CONFIG_MEMCG
fae7d834
MWO
75 if (folio->memcg_data)
76 pr_warn("memcg:%lx\n", folio->memcg_data);
91f5345a 77#endif
fae7d834 78 if (folio_test_ksm(folio))
5b57b8f2 79 type = "ksm ";
fae7d834 80 else if (folio_test_anon(folio))
5b57b8f2 81 type = "anon ";
3e9d80a8
MWO
82 else if (mapping)
83 dump_mapping(mapping);
edf14cdb 84 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
ff8e8116 85
fae7d834
MWO
86 /*
87 * Accessing the pageblock without the zone lock. It could change to
88 * "isolate" again in the meantime, but since we are just dumping the
89 * state for debugging, it should be fine to accept a bit of
90 * inaccuracy here due to racing.
91 */
92 pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
93 is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
8f790d0c
MWO
94 if (page_has_type(&folio->page))
95 pr_warn("page_type: %pGt\n", &folio->page.page_type);
f2421a16 96
e0392cf7 97 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
46e8a3a0
VB
98 sizeof(unsigned long), page,
99 sizeof(struct page), false);
fae7d834 100 if (folio_test_large(folio))
6197ab98 101 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
fae7d834
MWO
102 sizeof(unsigned long), folio,
103 2 * sizeof(struct page), false);
104}
105
106static void __dump_page(const struct page *page)
107{
108 struct folio *foliop, folio;
109 struct page precise;
110 unsigned long pfn = page_to_pfn(page);
111 unsigned long idx, nr_pages = 1;
112 int loops = 5;
113
114again:
115 memcpy(&precise, page, sizeof(*page));
116 foliop = page_folio(&precise);
117 if (foliop == (struct folio *)&precise) {
118 idx = 0;
119 if (!folio_test_large(foliop))
120 goto dump;
121 foliop = (struct folio *)page;
122 } else {
123 idx = folio_page_idx(foliop, page);
124 }
125
126 if (idx < MAX_FOLIO_NR_PAGES) {
127 memcpy(&folio, foliop, 2 * sizeof(struct page));
128 nr_pages = folio_nr_pages(&folio);
129 foliop = &folio;
130 }
131
132 if (idx > nr_pages) {
133 if (loops-- > 0)
134 goto again;
135 pr_warn("page does not match folio\n");
136 precise.compound_head &= ~1UL;
137 foliop = (struct folio *)&precise;
138 idx = 0;
139 }
140
141dump:
142 __dump_folio(foliop, &precise, pfn, idx);
82742a3a
SL
143}
144
b3a32033 145void dump_page(const struct page *page, const char *reason)
82742a3a 146{
be7c701f
MWO
147 if (PagePoisoned(page))
148 pr_warn("page:%p is uninitialized and poisoned", page);
149 else
150 __dump_page(page);
151 if (reason)
152 pr_warn("page dumped because: %s\n", reason);
4e462112 153 dump_page_owner(page);
82742a3a
SL
154}
155EXPORT_SYMBOL(dump_page);
156
157#ifdef CONFIG_DEBUG_VM
158
82742a3a
SL
159void dump_vma(const struct vm_area_struct *vma)
160{
763ecb03 161 pr_emerg("vma %px start %px end %px mm %px\n"
152a2d19
MW
162 "prot %lx anon_vma %px vm_ops %px\n"
163 "pgoff %lx file %px private_data %px\n"
b8eceeb9 164 "flags: %#lx(%pGv)\n",
763ecb03 165 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
82742a3a
SL
166 (unsigned long)pgprot_val(vma->vm_page_prot),
167 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
b8eceeb9
VB
168 vma->vm_file, vma->vm_private_data,
169 vma->vm_flags, &vma->vm_flags);
82742a3a
SL
170}
171EXPORT_SYMBOL(dump_vma);
172
31c9afa6
SL
173void dump_mm(const struct mm_struct *mm)
174{
763ecb03 175 pr_emerg("mm %px task_size %lu\n"
763ecb03 176 "mmap_base %lu mmap_legacy_base %lu\n"
152a2d19 177 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
31c9afa6 178 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
70f8a3ca 179 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
31c9afa6
SL
180 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
181 "start_brk %lx brk %lx start_stack %lx\n"
182 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
0258b5fd 183 "binfmt %px flags %lx\n"
31c9afa6 184#ifdef CONFIG_AIO
152a2d19 185 "ioctx_table %px\n"
31c9afa6
SL
186#endif
187#ifdef CONFIG_MEMCG
152a2d19 188 "owner %px "
31c9afa6 189#endif
152a2d19 190 "exe_file %px\n"
31c9afa6 191#ifdef CONFIG_MMU_NOTIFIER
984cfe4e 192 "notifier_subscriptions %px\n"
31c9afa6
SL
193#endif
194#ifdef CONFIG_NUMA_BALANCING
195 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
196#endif
31c9afa6 197 "tlb_flush_pending %d\n"
b8eceeb9 198 "def_flags: %#lx(%pGv)\n",
31c9afa6 199
763ecb03 200 mm, mm->task_size,
763ecb03 201 mm->mmap_base, mm->mmap_legacy_base,
31c9afa6
SL
202 mm->pgd, atomic_read(&mm->mm_users),
203 atomic_read(&mm->mm_count),
af5b0f6a 204 mm_pgtables_bytes(mm),
31c9afa6
SL
205 mm->map_count,
206 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
44dc1b1f 207 (u64)atomic64_read(&mm->pinned_vm),
70f8a3ca 208 mm->data_vm, mm->exec_vm, mm->stack_vm,
31c9afa6
SL
209 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
210 mm->start_brk, mm->brk, mm->start_stack,
211 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
0258b5fd 212 mm->binfmt, mm->flags,
31c9afa6
SL
213#ifdef CONFIG_AIO
214 mm->ioctx_table,
215#endif
216#ifdef CONFIG_MEMCG
217 mm->owner,
218#endif
219 mm->exe_file,
220#ifdef CONFIG_MMU_NOTIFIER
984cfe4e 221 mm->notifier_subscriptions,
31c9afa6
SL
222#endif
223#ifdef CONFIG_NUMA_BALANCING
224 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
225#endif
16af97dc 226 atomic_read(&mm->tlb_flush_pending),
b8eceeb9
VB
227 mm->def_flags, &mm->def_flags
228 );
31c9afa6 229}
c2fdc235 230EXPORT_SYMBOL(dump_mm);
31c9afa6 231
f682a97a
AD
232static bool page_init_poisoning __read_mostly = true;
233
234static int __init setup_vm_debug(char *str)
235{
236 bool __page_init_poisoning = true;
237
238 /*
239 * Calling vm_debug with no arguments is equivalent to requesting
240 * to enable all debugging options we can control.
241 */
242 if (*str++ != '=' || !*str)
243 goto out;
244
245 __page_init_poisoning = false;
246 if (*str == '-')
247 goto out;
248
249 while (*str) {
250 switch (tolower(*str)) {
251 case'p':
252 __page_init_poisoning = true;
253 break;
254 default:
255 pr_err("vm_debug option '%c' unknown. skipped\n",
256 *str);
257 }
258
259 str++;
260 }
261out:
262 if (page_init_poisoning && !__page_init_poisoning)
263 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
264
265 page_init_poisoning = __page_init_poisoning;
266
267 return 1;
268}
269__setup("vm_debug", setup_vm_debug);
270
271void page_init_poison(struct page *page, size_t size)
272{
273 if (page_init_poisoning)
274 memset(page, PAGE_POISON_PATTERN, size);
275}
b50e195f
LH
276
277void vma_iter_dump_tree(const struct vma_iterator *vmi)
278{
279#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
280 mas_dump(&vmi->mas);
281 mt_dump(vmi->mas.tree, mt_dump_hex);
282#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
283}
284
82742a3a 285#endif /* CONFIG_DEBUG_VM */