]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Workingset detection | |
4 | * | |
5 | * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner | |
6 | */ | |
7 | ||
8 | #include <linux/memcontrol.h> | |
9 | #include <linux/mm_inline.h> | |
10 | #include <linux/writeback.h> | |
11 | #include <linux/shmem_fs.h> | |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/atomic.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/swap.h> | |
16 | #include <linux/dax.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/mm.h> | |
19 | #include "internal.h" | |
20 | ||
21 | /* | |
22 | * Double CLOCK lists | |
23 | * | |
24 | * Per node, two clock lists are maintained for file pages: the | |
25 | * inactive and the active list. Freshly faulted pages start out at | |
26 | * the head of the inactive list and page reclaim scans pages from the | |
27 | * tail. Pages that are accessed multiple times on the inactive list | |
28 | * are promoted to the active list, to protect them from reclaim, | |
29 | * whereas active pages are demoted to the inactive list when the | |
30 | * active list grows too big. | |
31 | * | |
32 | * fault ------------------------+ | |
33 | * | | |
34 | * +--------------+ | +-------------+ | |
35 | * reclaim <- | inactive | <-+-- demotion | active | <--+ | |
36 | * +--------------+ +-------------+ | | |
37 | * | | | |
38 | * +-------------- promotion ------------------+ | |
39 | * | |
40 | * | |
41 | * Access frequency and refault distance | |
42 | * | |
43 | * A workload is thrashing when its pages are frequently used but they | |
44 | * are evicted from the inactive list every time before another access | |
45 | * would have promoted them to the active list. | |
46 | * | |
47 | * In cases where the average access distance between thrashing pages | |
48 | * is bigger than the size of memory there is nothing that can be | |
49 | * done - the thrashing set could never fit into memory under any | |
50 | * circumstance. | |
51 | * | |
52 | * However, the average access distance could be bigger than the | |
53 | * inactive list, yet smaller than the size of memory. In this case, | |
54 | * the set could fit into memory if it weren't for the currently | |
55 | * active pages - which may be used more, hopefully less frequently: | |
56 | * | |
57 | * +-memory available to cache-+ | |
58 | * | | | |
59 | * +-inactive------+-active----+ | |
60 | * a b | c d e f g h i | J K L M N | | |
61 | * +---------------+-----------+ | |
62 | * | |
63 | * It is prohibitively expensive to accurately track access frequency | |
64 | * of pages. But a reasonable approximation can be made to measure | |
65 | * thrashing on the inactive list, after which refaulting pages can be | |
66 | * activated optimistically to compete with the existing active pages. | |
67 | * | |
68 | * Approximating inactive page access frequency - Observations: | |
69 | * | |
70 | * 1. When a page is accessed for the first time, it is added to the | |
71 | * head of the inactive list, slides every existing inactive page | |
72 | * towards the tail by one slot, and pushes the current tail page | |
73 | * out of memory. | |
74 | * | |
75 | * 2. When a page is accessed for the second time, it is promoted to | |
76 | * the active list, shrinking the inactive list by one slot. This | |
77 | * also slides all inactive pages that were faulted into the cache | |
78 | * more recently than the activated page towards the tail of the | |
79 | * inactive list. | |
80 | * | |
81 | * Thus: | |
82 | * | |
83 | * 1. The sum of evictions and activations between any two points in | |
84 | * time indicate the minimum number of inactive pages accessed in | |
85 | * between. | |
86 | * | |
87 | * 2. Moving one inactive page N page slots towards the tail of the | |
88 | * list requires at least N inactive page accesses. | |
89 | * | |
90 | * Combining these: | |
91 | * | |
92 | * 1. When a page is finally evicted from memory, the number of | |
93 | * inactive pages accessed while the page was in cache is at least | |
94 | * the number of page slots on the inactive list. | |
95 | * | |
96 | * 2. In addition, measuring the sum of evictions and activations (E) | |
97 | * at the time of a page's eviction, and comparing it to another | |
98 | * reading (R) at the time the page faults back into memory tells | |
99 | * the minimum number of accesses while the page was not cached. | |
100 | * This is called the refault distance. | |
101 | * | |
102 | * Because the first access of the page was the fault and the second | |
103 | * access the refault, we combine the in-cache distance with the | |
104 | * out-of-cache distance to get the complete minimum access distance | |
105 | * of this page: | |
106 | * | |
107 | * NR_inactive + (R - E) | |
108 | * | |
109 | * And knowing the minimum access distance of a page, we can easily | |
110 | * tell if the page would be able to stay in cache assuming all page | |
111 | * slots in the cache were available: | |
112 | * | |
113 | * NR_inactive + (R - E) <= NR_inactive + NR_active | |
114 | * | |
115 | * If we have swap we should consider about NR_inactive_anon and | |
116 | * NR_active_anon, so for page cache and anonymous respectively: | |
117 | * | |
118 | * NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file | |
119 | * + NR_inactive_anon + NR_active_anon | |
120 | * | |
121 | * NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon | |
122 | * + NR_inactive_file + NR_active_file | |
123 | * | |
124 | * Which can be further simplified to: | |
125 | * | |
126 | * (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon | |
127 | * | |
128 | * (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file | |
129 | * | |
130 | * Put into words, the refault distance (out-of-cache) can be seen as | |
131 | * a deficit in inactive list space (in-cache). If the inactive list | |
132 | * had (R - E) more page slots, the page would not have been evicted | |
133 | * in between accesses, but activated instead. And on a full system, | |
134 | * the only thing eating into inactive list space is active pages. | |
135 | * | |
136 | * | |
137 | * Refaulting inactive pages | |
138 | * | |
139 | * All that is known about the active list is that the pages have been | |
140 | * accessed more than once in the past. This means that at any given | |
141 | * time there is actually a good chance that pages on the active list | |
142 | * are no longer in active use. | |
143 | * | |
144 | * So when a refault distance of (R - E) is observed and there are at | |
145 | * least (R - E) pages in the userspace workingset, the refaulting page | |
146 | * is activated optimistically in the hope that (R - E) pages are actually | |
147 | * used less frequently than the refaulting page - or even not used at | |
148 | * all anymore. | |
149 | * | |
150 | * That means if inactive cache is refaulting with a suitable refault | |
151 | * distance, we assume the cache workingset is transitioning and put | |
152 | * pressure on the current workingset. | |
153 | * | |
154 | * If this is wrong and demotion kicks in, the pages which are truly | |
155 | * used more frequently will be reactivated while the less frequently | |
156 | * used once will be evicted from memory. | |
157 | * | |
158 | * But if this is right, the stale pages will be pushed out of memory | |
159 | * and the used pages get to stay in cache. | |
160 | * | |
161 | * Refaulting active pages | |
162 | * | |
163 | * If on the other hand the refaulting pages have recently been | |
164 | * deactivated, it means that the active list is no longer protecting | |
165 | * actively used cache from reclaim. The cache is NOT transitioning to | |
166 | * a different workingset; the existing workingset is thrashing in the | |
167 | * space allocated to the page cache. | |
168 | * | |
169 | * | |
170 | * Implementation | |
171 | * | |
172 | * For each node's LRU lists, a counter for inactive evictions and | |
173 | * activations is maintained (node->nonresident_age). | |
174 | * | |
175 | * On eviction, a snapshot of this counter (along with some bits to | |
176 | * identify the node) is stored in the now empty page cache | |
177 | * slot of the evicted page. This is called a shadow entry. | |
178 | * | |
179 | * On cache misses for which there are shadow entries, an eligible | |
180 | * refault distance will immediately activate the refaulting page. | |
181 | */ | |
182 | ||
183 | #define WORKINGSET_SHIFT 1 | |
184 | #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ | |
185 | WORKINGSET_SHIFT + NODES_SHIFT + \ | |
186 | MEM_CGROUP_ID_SHIFT) | |
187 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) | |
188 | ||
189 | /* | |
190 | * Eviction timestamps need to be able to cover the full range of | |
191 | * actionable refaults. However, bits are tight in the xarray | |
192 | * entry, and after storing the identifier for the lruvec there might | |
193 | * not be enough left to represent every single actionable refault. In | |
194 | * that case, we have to sacrifice granularity for distance, and group | |
195 | * evictions into coarser buckets by shaving off lower timestamp bits. | |
196 | */ | |
197 | static unsigned int bucket_order __read_mostly; | |
198 | ||
199 | static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, | |
200 | bool workingset) | |
201 | { | |
202 | eviction &= EVICTION_MASK; | |
203 | eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; | |
204 | eviction = (eviction << NODES_SHIFT) | pgdat->node_id; | |
205 | eviction = (eviction << WORKINGSET_SHIFT) | workingset; | |
206 | ||
207 | return xa_mk_value(eviction); | |
208 | } | |
209 | ||
210 | static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, | |
211 | unsigned long *evictionp, bool *workingsetp) | |
212 | { | |
213 | unsigned long entry = xa_to_value(shadow); | |
214 | int memcgid, nid; | |
215 | bool workingset; | |
216 | ||
217 | workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1); | |
218 | entry >>= WORKINGSET_SHIFT; | |
219 | nid = entry & ((1UL << NODES_SHIFT) - 1); | |
220 | entry >>= NODES_SHIFT; | |
221 | memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); | |
222 | entry >>= MEM_CGROUP_ID_SHIFT; | |
223 | ||
224 | *memcgidp = memcgid; | |
225 | *pgdat = NODE_DATA(nid); | |
226 | *evictionp = entry; | |
227 | *workingsetp = workingset; | |
228 | } | |
229 | ||
230 | #ifdef CONFIG_LRU_GEN | |
231 | ||
232 | static void *lru_gen_eviction(struct folio *folio) | |
233 | { | |
234 | int hist; | |
235 | unsigned long token; | |
236 | unsigned long min_seq; | |
237 | struct lruvec *lruvec; | |
238 | struct lru_gen_folio *lrugen; | |
239 | int type = folio_is_file_lru(folio); | |
240 | int delta = folio_nr_pages(folio); | |
241 | int refs = folio_lru_refs(folio); | |
242 | bool workingset = folio_test_workingset(folio); | |
243 | int tier = lru_tier_from_refs(refs, workingset); | |
244 | struct mem_cgroup *memcg = folio_memcg(folio); | |
245 | struct pglist_data *pgdat = folio_pgdat(folio); | |
246 | ||
247 | BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT); | |
248 | ||
249 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
250 | lrugen = &lruvec->lrugen; | |
251 | min_seq = READ_ONCE(lrugen->min_seq[type]); | |
252 | token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0); | |
253 | ||
254 | hist = lru_hist_from_seq(min_seq); | |
255 | atomic_long_add(delta, &lrugen->evicted[hist][type][tier]); | |
256 | ||
257 | return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Tests if the shadow entry is for a folio that was recently evicted. | |
262 | * Fills in @lruvec, @token, @workingset with the values unpacked from shadow. | |
263 | */ | |
264 | static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, | |
265 | unsigned long *token, bool *workingset) | |
266 | { | |
267 | int memcg_id; | |
268 | unsigned long max_seq; | |
269 | struct mem_cgroup *memcg; | |
270 | struct pglist_data *pgdat; | |
271 | ||
272 | unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); | |
273 | ||
274 | memcg = mem_cgroup_from_id(memcg_id); | |
275 | *lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
276 | ||
277 | max_seq = READ_ONCE((*lruvec)->lrugen.max_seq); | |
278 | max_seq &= EVICTION_MASK >> LRU_REFS_WIDTH; | |
279 | ||
280 | return abs_diff(max_seq, *token >> LRU_REFS_WIDTH) < MAX_NR_GENS; | |
281 | } | |
282 | ||
283 | static void lru_gen_refault(struct folio *folio, void *shadow) | |
284 | { | |
285 | bool recent; | |
286 | int hist, tier, refs; | |
287 | bool workingset; | |
288 | unsigned long token; | |
289 | struct lruvec *lruvec; | |
290 | struct lru_gen_folio *lrugen; | |
291 | int type = folio_is_file_lru(folio); | |
292 | int delta = folio_nr_pages(folio); | |
293 | ||
294 | rcu_read_lock(); | |
295 | ||
296 | recent = lru_gen_test_recent(shadow, &lruvec, &token, &workingset); | |
297 | if (lruvec != folio_lruvec(folio)) | |
298 | goto unlock; | |
299 | ||
300 | mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); | |
301 | ||
302 | if (!recent) | |
303 | goto unlock; | |
304 | ||
305 | lrugen = &lruvec->lrugen; | |
306 | ||
307 | hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type])); | |
308 | refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + 1; | |
309 | tier = lru_tier_from_refs(refs, workingset); | |
310 | ||
311 | atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); | |
312 | ||
313 | /* see folio_add_lru() where folio_set_active() will be called */ | |
314 | if (lru_gen_in_fault()) | |
315 | mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); | |
316 | ||
317 | if (workingset) { | |
318 | folio_set_workingset(folio); | |
319 | mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); | |
320 | } else | |
321 | set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); | |
322 | unlock: | |
323 | rcu_read_unlock(); | |
324 | } | |
325 | ||
326 | #else /* !CONFIG_LRU_GEN */ | |
327 | ||
328 | static void *lru_gen_eviction(struct folio *folio) | |
329 | { | |
330 | return NULL; | |
331 | } | |
332 | ||
333 | static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, | |
334 | unsigned long *token, bool *workingset) | |
335 | { | |
336 | return false; | |
337 | } | |
338 | ||
339 | static void lru_gen_refault(struct folio *folio, void *shadow) | |
340 | { | |
341 | } | |
342 | ||
343 | #endif /* CONFIG_LRU_GEN */ | |
344 | ||
345 | /** | |
346 | * workingset_age_nonresident - age non-resident entries as LRU ages | |
347 | * @lruvec: the lruvec that was aged | |
348 | * @nr_pages: the number of pages to count | |
349 | * | |
350 | * As in-memory pages are aged, non-resident pages need to be aged as | |
351 | * well, in order for the refault distances later on to be comparable | |
352 | * to the in-memory dimensions. This function allows reclaim and LRU | |
353 | * operations to drive the non-resident aging along in parallel. | |
354 | */ | |
355 | void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) | |
356 | { | |
357 | /* | |
358 | * Reclaiming a cgroup means reclaiming all its children in a | |
359 | * round-robin fashion. That means that each cgroup has an LRU | |
360 | * order that is composed of the LRU orders of its child | |
361 | * cgroups; and every page has an LRU position not just in the | |
362 | * cgroup that owns it, but in all of that group's ancestors. | |
363 | * | |
364 | * So when the physical inactive list of a leaf cgroup ages, | |
365 | * the virtual inactive lists of all its parents, including | |
366 | * the root cgroup's, age as well. | |
367 | */ | |
368 | do { | |
369 | atomic_long_add(nr_pages, &lruvec->nonresident_age); | |
370 | } while ((lruvec = parent_lruvec(lruvec))); | |
371 | } | |
372 | ||
373 | /** | |
374 | * workingset_eviction - note the eviction of a folio from memory | |
375 | * @target_memcg: the cgroup that is causing the reclaim | |
376 | * @folio: the folio being evicted | |
377 | * | |
378 | * Return: a shadow entry to be stored in @folio->mapping->i_pages in place | |
379 | * of the evicted @folio so that a later refault can be detected. | |
380 | */ | |
381 | void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) | |
382 | { | |
383 | struct pglist_data *pgdat = folio_pgdat(folio); | |
384 | unsigned long eviction; | |
385 | struct lruvec *lruvec; | |
386 | int memcgid; | |
387 | ||
388 | /* Folio is fully exclusive and pins folio's memory cgroup pointer */ | |
389 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); | |
390 | VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); | |
391 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); | |
392 | ||
393 | if (lru_gen_enabled()) | |
394 | return lru_gen_eviction(folio); | |
395 | ||
396 | lruvec = mem_cgroup_lruvec(target_memcg, pgdat); | |
397 | /* XXX: target_memcg can be NULL, go through lruvec */ | |
398 | memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); | |
399 | eviction = atomic_long_read(&lruvec->nonresident_age); | |
400 | eviction >>= bucket_order; | |
401 | workingset_age_nonresident(lruvec, folio_nr_pages(folio)); | |
402 | return pack_shadow(memcgid, pgdat, eviction, | |
403 | folio_test_workingset(folio)); | |
404 | } | |
405 | ||
406 | /** | |
407 | * workingset_test_recent - tests if the shadow entry is for a folio that was | |
408 | * recently evicted. Also fills in @workingset with the value unpacked from | |
409 | * shadow. | |
410 | * @shadow: the shadow entry to be tested. | |
411 | * @file: whether the corresponding folio is from the file lru. | |
412 | * @workingset: where the workingset value unpacked from shadow should | |
413 | * be stored. | |
414 | * @flush: whether to flush cgroup rstat. | |
415 | * | |
416 | * Return: true if the shadow is for a recently evicted folio; false otherwise. | |
417 | */ | |
418 | bool workingset_test_recent(void *shadow, bool file, bool *workingset, | |
419 | bool flush) | |
420 | { | |
421 | struct mem_cgroup *eviction_memcg; | |
422 | struct lruvec *eviction_lruvec; | |
423 | unsigned long refault_distance; | |
424 | unsigned long workingset_size; | |
425 | unsigned long refault; | |
426 | int memcgid; | |
427 | struct pglist_data *pgdat; | |
428 | unsigned long eviction; | |
429 | ||
430 | if (lru_gen_enabled()) { | |
431 | bool recent; | |
432 | ||
433 | rcu_read_lock(); | |
434 | recent = lru_gen_test_recent(shadow, &eviction_lruvec, &eviction, workingset); | |
435 | rcu_read_unlock(); | |
436 | return recent; | |
437 | } | |
438 | ||
439 | rcu_read_lock(); | |
440 | unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); | |
441 | eviction <<= bucket_order; | |
442 | ||
443 | /* | |
444 | * Look up the memcg associated with the stored ID. It might | |
445 | * have been deleted since the folio's eviction. | |
446 | * | |
447 | * Note that in rare events the ID could have been recycled | |
448 | * for a new cgroup that refaults a shared folio. This is | |
449 | * impossible to tell from the available data. However, this | |
450 | * should be a rare and limited disturbance, and activations | |
451 | * are always speculative anyway. Ultimately, it's the aging | |
452 | * algorithm's job to shake out the minimum access frequency | |
453 | * for the active cache. | |
454 | * | |
455 | * XXX: On !CONFIG_MEMCG, this will always return NULL; it | |
456 | * would be better if the root_mem_cgroup existed in all | |
457 | * configurations instead. | |
458 | */ | |
459 | eviction_memcg = mem_cgroup_from_id(memcgid); | |
460 | if (!mem_cgroup_tryget(eviction_memcg)) | |
461 | eviction_memcg = NULL; | |
462 | rcu_read_unlock(); | |
463 | ||
464 | if (!mem_cgroup_disabled() && !eviction_memcg) | |
465 | return false; | |
466 | /* | |
467 | * Flush stats (and potentially sleep) outside the RCU read section. | |
468 | * | |
469 | * Note that workingset_test_recent() itself might be called in RCU read | |
470 | * section (for e.g, in cachestat) - these callers need to skip flushing | |
471 | * stats (via the flush argument). | |
472 | * | |
473 | * XXX: With per-memcg flushing and thresholding, is ratelimiting | |
474 | * still needed here? | |
475 | */ | |
476 | if (flush) | |
477 | mem_cgroup_flush_stats_ratelimited(eviction_memcg); | |
478 | ||
479 | eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); | |
480 | refault = atomic_long_read(&eviction_lruvec->nonresident_age); | |
481 | ||
482 | /* | |
483 | * Calculate the refault distance | |
484 | * | |
485 | * The unsigned subtraction here gives an accurate distance | |
486 | * across nonresident_age overflows in most cases. There is a | |
487 | * special case: usually, shadow entries have a short lifetime | |
488 | * and are either refaulted or reclaimed along with the inode | |
489 | * before they get too old. But it is not impossible for the | |
490 | * nonresident_age to lap a shadow entry in the field, which | |
491 | * can then result in a false small refault distance, leading | |
492 | * to a false activation should this old entry actually | |
493 | * refault again. However, earlier kernels used to deactivate | |
494 | * unconditionally with *every* reclaim invocation for the | |
495 | * longest time, so the occasional inappropriate activation | |
496 | * leading to pressure on the active list is not a problem. | |
497 | */ | |
498 | refault_distance = (refault - eviction) & EVICTION_MASK; | |
499 | ||
500 | /* | |
501 | * Compare the distance to the existing workingset size. We | |
502 | * don't activate pages that couldn't stay resident even if | |
503 | * all the memory was available to the workingset. Whether | |
504 | * workingset competition needs to consider anon or not depends | |
505 | * on having free swap space. | |
506 | */ | |
507 | workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); | |
508 | if (!file) { | |
509 | workingset_size += lruvec_page_state(eviction_lruvec, | |
510 | NR_INACTIVE_FILE); | |
511 | } | |
512 | if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) { | |
513 | workingset_size += lruvec_page_state(eviction_lruvec, | |
514 | NR_ACTIVE_ANON); | |
515 | if (file) { | |
516 | workingset_size += lruvec_page_state(eviction_lruvec, | |
517 | NR_INACTIVE_ANON); | |
518 | } | |
519 | } | |
520 | ||
521 | mem_cgroup_put(eviction_memcg); | |
522 | return refault_distance <= workingset_size; | |
523 | } | |
524 | ||
525 | /** | |
526 | * workingset_refault - Evaluate the refault of a previously evicted folio. | |
527 | * @folio: The freshly allocated replacement folio. | |
528 | * @shadow: Shadow entry of the evicted folio. | |
529 | * | |
530 | * Calculates and evaluates the refault distance of the previously | |
531 | * evicted folio in the context of the node and the memcg whose memory | |
532 | * pressure caused the eviction. | |
533 | */ | |
534 | void workingset_refault(struct folio *folio, void *shadow) | |
535 | { | |
536 | bool file = folio_is_file_lru(folio); | |
537 | struct pglist_data *pgdat; | |
538 | struct mem_cgroup *memcg; | |
539 | struct lruvec *lruvec; | |
540 | bool workingset; | |
541 | long nr; | |
542 | ||
543 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); | |
544 | ||
545 | if (lru_gen_enabled()) { | |
546 | lru_gen_refault(folio, shadow); | |
547 | return; | |
548 | } | |
549 | ||
550 | /* | |
551 | * The activation decision for this folio is made at the level | |
552 | * where the eviction occurred, as that is where the LRU order | |
553 | * during folio reclaim is being determined. | |
554 | * | |
555 | * However, the cgroup that will own the folio is the one that | |
556 | * is actually experiencing the refault event. Make sure the folio is | |
557 | * locked to guarantee folio_memcg() stability throughout. | |
558 | */ | |
559 | nr = folio_nr_pages(folio); | |
560 | memcg = folio_memcg(folio); | |
561 | pgdat = folio_pgdat(folio); | |
562 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
563 | ||
564 | mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); | |
565 | ||
566 | if (!workingset_test_recent(shadow, file, &workingset, true)) | |
567 | return; | |
568 | ||
569 | folio_set_active(folio); | |
570 | workingset_age_nonresident(lruvec, nr); | |
571 | mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr); | |
572 | ||
573 | /* Folio was active prior to eviction */ | |
574 | if (workingset) { | |
575 | folio_set_workingset(folio); | |
576 | /* | |
577 | * XXX: Move to folio_add_lru() when it supports new vs | |
578 | * putback | |
579 | */ | |
580 | lru_note_cost_refault(folio); | |
581 | mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); | |
582 | } | |
583 | } | |
584 | ||
585 | /** | |
586 | * workingset_activation - note a page activation | |
587 | * @folio: Folio that is being activated. | |
588 | */ | |
589 | void workingset_activation(struct folio *folio) | |
590 | { | |
591 | /* | |
592 | * Filter non-memcg pages here, e.g. unmap can call | |
593 | * mark_page_accessed() on VDSO pages. | |
594 | */ | |
595 | if (mem_cgroup_disabled() || folio_memcg_charged(folio)) | |
596 | workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio)); | |
597 | } | |
598 | ||
599 | /* | |
600 | * Shadow entries reflect the share of the working set that does not | |
601 | * fit into memory, so their number depends on the access pattern of | |
602 | * the workload. In most cases, they will refault or get reclaimed | |
603 | * along with the inode, but a (malicious) workload that streams | |
604 | * through files with a total size several times that of available | |
605 | * memory, while preventing the inodes from being reclaimed, can | |
606 | * create excessive amounts of shadow nodes. To keep a lid on this, | |
607 | * track shadow nodes and reclaim them when they grow way past the | |
608 | * point where they would still be useful. | |
609 | */ | |
610 | ||
611 | struct list_lru shadow_nodes; | |
612 | ||
613 | void workingset_update_node(struct xa_node *node) | |
614 | { | |
615 | struct page *page = virt_to_page(node); | |
616 | ||
617 | /* | |
618 | * Track non-empty nodes that contain only shadow entries; | |
619 | * unlink those that contain pages or are being freed. | |
620 | * | |
621 | * Avoid acquiring the list_lru lock when the nodes are | |
622 | * already where they should be. The list_empty() test is safe | |
623 | * as node->private_list is protected by the i_pages lock. | |
624 | */ | |
625 | lockdep_assert_held(&node->array->xa_lock); | |
626 | ||
627 | if (node->count && node->count == node->nr_values) { | |
628 | if (list_empty(&node->private_list)) { | |
629 | list_lru_add_obj(&shadow_nodes, &node->private_list); | |
630 | __inc_node_page_state(page, WORKINGSET_NODES); | |
631 | } | |
632 | } else { | |
633 | if (!list_empty(&node->private_list)) { | |
634 | list_lru_del_obj(&shadow_nodes, &node->private_list); | |
635 | __dec_node_page_state(page, WORKINGSET_NODES); | |
636 | } | |
637 | } | |
638 | } | |
639 | ||
640 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, | |
641 | struct shrink_control *sc) | |
642 | { | |
643 | unsigned long max_nodes; | |
644 | unsigned long nodes; | |
645 | unsigned long pages; | |
646 | ||
647 | nodes = list_lru_shrink_count(&shadow_nodes, sc); | |
648 | if (!nodes) | |
649 | return SHRINK_EMPTY; | |
650 | ||
651 | /* | |
652 | * Approximate a reasonable limit for the nodes | |
653 | * containing shadow entries. We don't need to keep more | |
654 | * shadow entries than possible pages on the active list, | |
655 | * since refault distances bigger than that are dismissed. | |
656 | * | |
657 | * The size of the active list converges toward 100% of | |
658 | * overall page cache as memory grows, with only a tiny | |
659 | * inactive list. Assume the total cache size for that. | |
660 | * | |
661 | * Nodes might be sparsely populated, with only one shadow | |
662 | * entry in the extreme case. Obviously, we cannot keep one | |
663 | * node for every eligible shadow entry, so compromise on a | |
664 | * worst-case density of 1/8th. Below that, not all eligible | |
665 | * refaults can be detected anymore. | |
666 | * | |
667 | * On 64-bit with 7 xa_nodes per page and 64 slots | |
668 | * each, this will reclaim shadow entries when they consume | |
669 | * ~1.8% of available memory: | |
670 | * | |
671 | * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE | |
672 | */ | |
673 | #ifdef CONFIG_MEMCG | |
674 | if (sc->memcg) { | |
675 | struct lruvec *lruvec; | |
676 | int i; | |
677 | ||
678 | mem_cgroup_flush_stats_ratelimited(sc->memcg); | |
679 | lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); | |
680 | for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) | |
681 | pages += lruvec_page_state_local(lruvec, | |
682 | NR_LRU_BASE + i); | |
683 | pages += lruvec_page_state_local( | |
684 | lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; | |
685 | pages += lruvec_page_state_local( | |
686 | lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; | |
687 | } else | |
688 | #endif | |
689 | pages = node_present_pages(sc->nid); | |
690 | ||
691 | max_nodes = pages >> (XA_CHUNK_SHIFT - 3); | |
692 | ||
693 | if (nodes <= max_nodes) | |
694 | return 0; | |
695 | return nodes - max_nodes; | |
696 | } | |
697 | ||
698 | static enum lru_status shadow_lru_isolate(struct list_head *item, | |
699 | struct list_lru_one *lru, | |
700 | void *arg) __must_hold(lru->lock) | |
701 | { | |
702 | struct xa_node *node = container_of(item, struct xa_node, private_list); | |
703 | struct address_space *mapping; | |
704 | int ret; | |
705 | ||
706 | /* | |
707 | * Page cache insertions and deletions synchronously maintain | |
708 | * the shadow node LRU under the i_pages lock and the | |
709 | * &lru->lock. Because the page cache tree is emptied before | |
710 | * the inode can be destroyed, holding the &lru->lock pins any | |
711 | * address_space that has nodes on the LRU. | |
712 | * | |
713 | * We can then safely transition to the i_pages lock to | |
714 | * pin only the address_space of the particular node we want | |
715 | * to reclaim, take the node off-LRU, and drop the &lru->lock. | |
716 | */ | |
717 | ||
718 | mapping = container_of(node->array, struct address_space, i_pages); | |
719 | ||
720 | /* Coming from the list, invert the lock order */ | |
721 | if (!xa_trylock(&mapping->i_pages)) { | |
722 | spin_unlock_irq(&lru->lock); | |
723 | ret = LRU_RETRY; | |
724 | goto out; | |
725 | } | |
726 | ||
727 | /* For page cache we need to hold i_lock */ | |
728 | if (mapping->host != NULL) { | |
729 | if (!spin_trylock(&mapping->host->i_lock)) { | |
730 | xa_unlock(&mapping->i_pages); | |
731 | spin_unlock_irq(&lru->lock); | |
732 | ret = LRU_RETRY; | |
733 | goto out; | |
734 | } | |
735 | } | |
736 | ||
737 | list_lru_isolate(lru, item); | |
738 | __dec_node_page_state(virt_to_page(node), WORKINGSET_NODES); | |
739 | ||
740 | spin_unlock(&lru->lock); | |
741 | ||
742 | /* | |
743 | * The nodes should only contain one or more shadow entries, | |
744 | * no pages, so we expect to be able to remove them all and | |
745 | * delete and free the empty node afterwards. | |
746 | */ | |
747 | if (WARN_ON_ONCE(!node->nr_values)) | |
748 | goto out_invalid; | |
749 | if (WARN_ON_ONCE(node->count != node->nr_values)) | |
750 | goto out_invalid; | |
751 | xa_delete_node(node, workingset_update_node); | |
752 | __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); | |
753 | ||
754 | out_invalid: | |
755 | xa_unlock_irq(&mapping->i_pages); | |
756 | if (mapping->host != NULL) { | |
757 | if (mapping_shrinkable(mapping)) | |
758 | inode_add_lru(mapping->host); | |
759 | spin_unlock(&mapping->host->i_lock); | |
760 | } | |
761 | ret = LRU_REMOVED_RETRY; | |
762 | out: | |
763 | cond_resched(); | |
764 | return ret; | |
765 | } | |
766 | ||
767 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, | |
768 | struct shrink_control *sc) | |
769 | { | |
770 | /* list_lru lock nests inside the IRQ-safe i_pages lock */ | |
771 | return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, | |
772 | NULL); | |
773 | } | |
774 | ||
775 | /* | |
776 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe | |
777 | * i_pages lock. | |
778 | */ | |
779 | static struct lock_class_key shadow_nodes_key; | |
780 | ||
781 | static int __init workingset_init(void) | |
782 | { | |
783 | struct shrinker *workingset_shadow_shrinker; | |
784 | unsigned int timestamp_bits; | |
785 | unsigned int max_order; | |
786 | int ret = -ENOMEM; | |
787 | ||
788 | BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); | |
789 | /* | |
790 | * Calculate the eviction bucket size to cover the longest | |
791 | * actionable refault distance, which is currently half of | |
792 | * memory (totalram_pages/2). However, memory hotplug may add | |
793 | * some more pages at runtime, so keep working with up to | |
794 | * double the initial memory by using totalram_pages as-is. | |
795 | */ | |
796 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; | |
797 | max_order = fls_long(totalram_pages() - 1); | |
798 | if (max_order > timestamp_bits) | |
799 | bucket_order = max_order - timestamp_bits; | |
800 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", | |
801 | timestamp_bits, max_order, bucket_order); | |
802 | ||
803 | workingset_shadow_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | | |
804 | SHRINKER_MEMCG_AWARE, | |
805 | "mm-shadow"); | |
806 | if (!workingset_shadow_shrinker) | |
807 | goto err; | |
808 | ||
809 | ret = list_lru_init_memcg_key(&shadow_nodes, workingset_shadow_shrinker, | |
810 | &shadow_nodes_key); | |
811 | if (ret) | |
812 | goto err_list_lru; | |
813 | ||
814 | workingset_shadow_shrinker->count_objects = count_shadow_nodes; | |
815 | workingset_shadow_shrinker->scan_objects = scan_shadow_nodes; | |
816 | /* ->count reports only fully expendable nodes */ | |
817 | workingset_shadow_shrinker->seeks = 0; | |
818 | ||
819 | shrinker_register(workingset_shadow_shrinker); | |
820 | return 0; | |
821 | err_list_lru: | |
822 | shrinker_free(workingset_shadow_shrinker); | |
823 | err: | |
824 | return ret; | |
825 | } | |
826 | module_init(workingset_init); |