]> git.ipfire.org Git - people/arne_f/kernel.git/blame - mm/page_ext.c
net/mlx5: Loop over temp list to release delay events
[people/arne_f/kernel.git] / mm / page_ext.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
eefa864b
JK
2#include <linux/mm.h>
3#include <linux/mmzone.h>
4#include <linux/bootmem.h>
5#include <linux/page_ext.h>
6#include <linux/memory.h>
7#include <linux/vmalloc.h>
8#include <linux/kmemleak.h>
48c96a36 9#include <linux/page_owner.h>
33c3fc71 10#include <linux/page_idle.h>
eefa864b
JK
11
12/*
13 * struct page extension
14 *
15 * This is the feature to manage memory for extended data per page.
16 *
17 * Until now, we must modify struct page itself to store extra data per page.
18 * This requires rebuilding the kernel and it is really time consuming process.
19 * And, sometimes, rebuild is impossible due to third party module dependency.
20 * At last, enlarging struct page could cause un-wanted system behaviour change.
21 *
22 * This feature is intended to overcome above mentioned problems. This feature
23 * allocates memory for extended data per page in certain place rather than
24 * the struct page itself. This memory can be accessed by the accessor
25 * functions provided by this code. During the boot process, it checks whether
26 * allocation of huge chunk of memory is needed or not. If not, it avoids
27 * allocating memory at all. With this advantage, we can include this feature
28 * into the kernel in default and can avoid rebuild and solve related problems.
29 *
30 * To help these things to work well, there are two callbacks for clients. One
31 * is the need callback which is mandatory if user wants to avoid useless
32 * memory allocation at boot-time. The other is optional, init callback, which
33 * is used to do proper initialization after memory is allocated.
34 *
35 * The need callback is used to decide whether extended memory allocation is
36 * needed or not. Sometimes users want to deactivate some features in this
37 * boot and extra memory would be unneccessary. In this case, to avoid
38 * allocating huge chunk of memory, each clients represent their need of
39 * extra memory through the need callback. If one of the need callbacks
40 * returns true, it means that someone needs extra memory so that
41 * page extension core should allocates memory for page extension. If
42 * none of need callbacks return true, memory isn't needed at all in this boot
43 * and page extension core can skip to allocate memory. As result,
44 * none of memory is wasted.
45 *
980ac167
JK
46 * When need callback returns true, page_ext checks if there is a request for
47 * extra memory through size in struct page_ext_operations. If it is non-zero,
48 * extra space is allocated for each page_ext entry and offset is returned to
49 * user through offset in struct page_ext_operations.
50 *
eefa864b
JK
51 * The init callback is used to do proper initialization after page extension
52 * is completely initialized. In sparse memory system, extra memory is
53 * allocated some time later than memmap is allocated. In other words, lifetime
54 * of memory for page extension isn't same with memmap for struct page.
55 * Therefore, clients can't store extra data until page extension is
56 * initialized, even if pages are allocated and used freely. This could
57 * cause inadequate state of extra data per page, so, to prevent it, client
58 * can utilize this callback to initialize the state of it correctly.
59 */
60
61static struct page_ext_operations *page_ext_ops[] = {
e30825f1 62 &debug_guardpage_ops,
48c96a36
JK
63#ifdef CONFIG_PAGE_OWNER
64 &page_owner_ops,
65#endif
33c3fc71
VD
66#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
67 &page_idle_ops,
68#endif
eefa864b
JK
69};
70
71static unsigned long total_usage;
980ac167 72static unsigned long extra_mem;
eefa864b
JK
73
74static bool __init invoke_need_callbacks(void)
75{
76 int i;
77 int entries = ARRAY_SIZE(page_ext_ops);
980ac167 78 bool need = false;
eefa864b
JK
79
80 for (i = 0; i < entries; i++) {
980ac167
JK
81 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
82 page_ext_ops[i]->offset = sizeof(struct page_ext) +
83 extra_mem;
84 extra_mem += page_ext_ops[i]->size;
85 need = true;
86 }
eefa864b
JK
87 }
88
980ac167 89 return need;
eefa864b
JK
90}
91
92static void __init invoke_init_callbacks(void)
93{
94 int i;
95 int entries = ARRAY_SIZE(page_ext_ops);
96
97 for (i = 0; i < entries; i++) {
98 if (page_ext_ops[i]->init)
99 page_ext_ops[i]->init();
100 }
101}
102
980ac167
JK
103static unsigned long get_entry_size(void)
104{
105 return sizeof(struct page_ext) + extra_mem;
106}
107
108static inline struct page_ext *get_entry(void *base, unsigned long index)
109{
110 return base + get_entry_size() * index;
111}
112
eefa864b
JK
113#if !defined(CONFIG_SPARSEMEM)
114
115
116void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
117{
118 pgdat->node_page_ext = NULL;
119}
120
121struct page_ext *lookup_page_ext(struct page *page)
122{
123 unsigned long pfn = page_to_pfn(page);
0b06bb3f 124 unsigned long index;
eefa864b
JK
125 struct page_ext *base;
126
127 base = NODE_DATA(page_to_nid(page))->node_page_ext;
bd33ef36 128#if defined(CONFIG_DEBUG_VM)
eefa864b
JK
129 /*
130 * The sanity checks the page allocator does upon freeing a
131 * page can reach here before the page_ext arrays are
132 * allocated when feeding a range of pages to the allocator
133 * for the first time during bootup or memory hotplug.
134 */
135 if (unlikely(!base))
136 return NULL;
137#endif
0b06bb3f 138 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
eefa864b 139 MAX_ORDER_NR_PAGES);
980ac167 140 return get_entry(base, index);
eefa864b
JK
141}
142
143static int __init alloc_node_page_ext(int nid)
144{
145 struct page_ext *base;
146 unsigned long table_size;
147 unsigned long nr_pages;
148
149 nr_pages = NODE_DATA(nid)->node_spanned_pages;
150 if (!nr_pages)
151 return 0;
152
153 /*
154 * Need extra space if node range is not aligned with
155 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
156 * checks buddy's status, range could be out of exact node range.
157 */
158 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
159 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
160 nr_pages += MAX_ORDER_NR_PAGES;
161
980ac167 162 table_size = get_entry_size() * nr_pages;
eefa864b
JK
163
164 base = memblock_virt_alloc_try_nid_nopanic(
165 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
166 BOOTMEM_ALLOC_ACCESSIBLE, nid);
167 if (!base)
168 return -ENOMEM;
169 NODE_DATA(nid)->node_page_ext = base;
170 total_usage += table_size;
171 return 0;
172}
173
174void __init page_ext_init_flatmem(void)
175{
176
177 int nid, fail;
178
179 if (!invoke_need_callbacks())
180 return;
181
182 for_each_online_node(nid) {
183 fail = alloc_node_page_ext(nid);
184 if (fail)
185 goto fail;
186 }
187 pr_info("allocated %ld bytes of page_ext\n", total_usage);
188 invoke_init_callbacks();
189 return;
190
191fail:
192 pr_crit("allocation of page_ext failed.\n");
193 panic("Out of memory");
194}
195
196#else /* CONFIG_FLAT_NODE_MEM_MAP */
197
198struct page_ext *lookup_page_ext(struct page *page)
199{
200 unsigned long pfn = page_to_pfn(page);
201 struct mem_section *section = __pfn_to_section(pfn);
bd33ef36 202#if defined(CONFIG_DEBUG_VM)
eefa864b
JK
203 /*
204 * The sanity checks the page allocator does upon freeing a
205 * page can reach here before the page_ext arrays are
206 * allocated when feeding a range of pages to the allocator
207 * for the first time during bootup or memory hotplug.
208 */
209 if (!section->page_ext)
210 return NULL;
211#endif
980ac167 212 return get_entry(section->page_ext, pfn);
eefa864b
JK
213}
214
215static void *__meminit alloc_page_ext(size_t size, int nid)
216{
217 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
218 void *addr = NULL;
219
220 addr = alloc_pages_exact_nid(nid, size, flags);
221 if (addr) {
222 kmemleak_alloc(addr, size, 1, flags);
223 return addr;
224 }
225
b95046b0 226 addr = vzalloc_node(size, nid);
eefa864b
JK
227
228 return addr;
229}
230
231static int __meminit init_section_page_ext(unsigned long pfn, int nid)
232{
233 struct mem_section *section;
234 struct page_ext *base;
235 unsigned long table_size;
236
237 section = __pfn_to_section(pfn);
238
239 if (section->page_ext)
240 return 0;
241
980ac167 242 table_size = get_entry_size() * PAGES_PER_SECTION;
eefa864b
JK
243 base = alloc_page_ext(table_size, nid);
244
245 /*
246 * The value stored in section->page_ext is (base - pfn)
247 * and it does not point to the memory block allocated above,
248 * causing kmemleak false positives.
249 */
250 kmemleak_not_leak(base);
251
252 if (!base) {
253 pr_err("page ext allocation failure\n");
254 return -ENOMEM;
255 }
256
257 /*
258 * The passed "pfn" may not be aligned to SECTION. For the calculation
259 * we need to apply a mask.
260 */
261 pfn &= PAGE_SECTION_MASK;
980ac167 262 section->page_ext = (void *)base - get_entry_size() * pfn;
eefa864b
JK
263 total_usage += table_size;
264 return 0;
265}
266#ifdef CONFIG_MEMORY_HOTPLUG
267static void free_page_ext(void *addr)
268{
269 if (is_vmalloc_addr(addr)) {
270 vfree(addr);
271 } else {
272 struct page *page = virt_to_page(addr);
273 size_t table_size;
274
980ac167 275 table_size = get_entry_size() * PAGES_PER_SECTION;
eefa864b
JK
276
277 BUG_ON(PageReserved(page));
278 free_pages_exact(addr, table_size);
279 }
280}
281
282static void __free_page_ext(unsigned long pfn)
283{
284 struct mem_section *ms;
285 struct page_ext *base;
286
287 ms = __pfn_to_section(pfn);
288 if (!ms || !ms->page_ext)
289 return;
980ac167 290 base = get_entry(ms->page_ext, pfn);
eefa864b
JK
291 free_page_ext(base);
292 ms->page_ext = NULL;
293}
294
295static int __meminit online_page_ext(unsigned long start_pfn,
296 unsigned long nr_pages,
297 int nid)
298{
299 unsigned long start, end, pfn;
300 int fail = 0;
301
302 start = SECTION_ALIGN_DOWN(start_pfn);
303 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
304
305 if (nid == -1) {
306 /*
307 * In this case, "nid" already exists and contains valid memory.
308 * "start_pfn" passed to us is a pfn which is an arg for
309 * online__pages(), and start_pfn should exist.
310 */
311 nid = pfn_to_nid(start_pfn);
312 VM_BUG_ON(!node_state(nid, N_ONLINE));
313 }
314
315 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
316 if (!pfn_present(pfn))
317 continue;
318 fail = init_section_page_ext(pfn, nid);
319 }
320 if (!fail)
321 return 0;
322
323 /* rollback */
324 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
325 __free_page_ext(pfn);
326
327 return -ENOMEM;
328}
329
330static int __meminit offline_page_ext(unsigned long start_pfn,
331 unsigned long nr_pages, int nid)
332{
333 unsigned long start, end, pfn;
334
335 start = SECTION_ALIGN_DOWN(start_pfn);
336 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
337
338 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
339 __free_page_ext(pfn);
340 return 0;
341
342}
343
344static int __meminit page_ext_callback(struct notifier_block *self,
345 unsigned long action, void *arg)
346{
347 struct memory_notify *mn = arg;
348 int ret = 0;
349
350 switch (action) {
351 case MEM_GOING_ONLINE:
352 ret = online_page_ext(mn->start_pfn,
353 mn->nr_pages, mn->status_change_nid);
354 break;
355 case MEM_OFFLINE:
356 offline_page_ext(mn->start_pfn,
357 mn->nr_pages, mn->status_change_nid);
358 break;
359 case MEM_CANCEL_ONLINE:
360 offline_page_ext(mn->start_pfn,
361 mn->nr_pages, mn->status_change_nid);
362 break;
363 case MEM_GOING_OFFLINE:
364 break;
365 case MEM_ONLINE:
366 case MEM_CANCEL_OFFLINE:
367 break;
368 }
369
370 return notifier_from_errno(ret);
371}
372
373#endif
374
375void __init page_ext_init(void)
376{
377 unsigned long pfn;
378 int nid;
379
380 if (!invoke_need_callbacks())
381 return;
382
383 for_each_node_state(nid, N_MEMORY) {
384 unsigned long start_pfn, end_pfn;
385
386 start_pfn = node_start_pfn(nid);
387 end_pfn = node_end_pfn(nid);
388 /*
389 * start_pfn and end_pfn may not be aligned to SECTION and the
390 * page->flags of out of node pages are not initialized. So we
391 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
392 */
393 for (pfn = start_pfn; pfn < end_pfn;
394 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
395
396 if (!pfn_valid(pfn))
397 continue;
398 /*
399 * Nodes's pfns can be overlapping.
400 * We know some arch can have a nodes layout such as
401 * -------------pfn-------------->
402 * N0 | N1 | N2 | N0 | N1 | N2|....
fe53ca54
YS
403 *
404 * Take into account DEFERRED_STRUCT_PAGE_INIT.
eefa864b 405 */
fe53ca54 406 if (early_pfn_to_nid(pfn) != nid)
eefa864b
JK
407 continue;
408 if (init_section_page_ext(pfn, nid))
409 goto oom;
0fc542b7 410 cond_resched();
eefa864b
JK
411 }
412 }
413 hotplug_memory_notifier(page_ext_callback, 0);
414 pr_info("allocated %ld bytes of page_ext\n", total_usage);
415 invoke_init_callbacks();
416 return;
417
418oom:
419 panic("Out of memory");
420}
421
422void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
423{
424}
425
426#endif