]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a528910e JW |
2 | /* |
3 | * Workingset detection | |
4 | * | |
5 | * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner | |
6 | */ | |
7 | ||
8 | #include <linux/memcontrol.h> | |
170b04b7 | 9 | #include <linux/mm_inline.h> |
a528910e | 10 | #include <linux/writeback.h> |
3a4f8a0b | 11 | #include <linux/shmem_fs.h> |
a528910e JW |
12 | #include <linux/pagemap.h> |
13 | #include <linux/atomic.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/swap.h> | |
14b46879 | 16 | #include <linux/dax.h> |
a528910e JW |
17 | #include <linux/fs.h> |
18 | #include <linux/mm.h> | |
b64e74e9 | 19 | #include "internal.h" |
a528910e JW |
20 | |
21 | /* | |
22 | * Double CLOCK lists | |
23 | * | |
1e6b1085 | 24 | * Per node, two clock lists are maintained for file pages: the |
a528910e JW |
25 | * inactive and the active list. Freshly faulted pages start out at |
26 | * the head of the inactive list and page reclaim scans pages from the | |
27 | * tail. Pages that are accessed multiple times on the inactive list | |
28 | * are promoted to the active list, to protect them from reclaim, | |
29 | * whereas active pages are demoted to the inactive list when the | |
30 | * active list grows too big. | |
31 | * | |
32 | * fault ------------------------+ | |
33 | * | | |
34 | * +--------------+ | +-------------+ | |
35 | * reclaim <- | inactive | <-+-- demotion | active | <--+ | |
36 | * +--------------+ +-------------+ | | |
37 | * | | | |
38 | * +-------------- promotion ------------------+ | |
39 | * | |
40 | * | |
41 | * Access frequency and refault distance | |
42 | * | |
43 | * A workload is thrashing when its pages are frequently used but they | |
44 | * are evicted from the inactive list every time before another access | |
45 | * would have promoted them to the active list. | |
46 | * | |
47 | * In cases where the average access distance between thrashing pages | |
48 | * is bigger than the size of memory there is nothing that can be | |
49 | * done - the thrashing set could never fit into memory under any | |
50 | * circumstance. | |
51 | * | |
52 | * However, the average access distance could be bigger than the | |
53 | * inactive list, yet smaller than the size of memory. In this case, | |
54 | * the set could fit into memory if it weren't for the currently | |
55 | * active pages - which may be used more, hopefully less frequently: | |
56 | * | |
57 | * +-memory available to cache-+ | |
58 | * | | | |
59 | * +-inactive------+-active----+ | |
60 | * a b | c d e f g h i | J K L M N | | |
61 | * +---------------+-----------+ | |
62 | * | |
63 | * It is prohibitively expensive to accurately track access frequency | |
64 | * of pages. But a reasonable approximation can be made to measure | |
65 | * thrashing on the inactive list, after which refaulting pages can be | |
66 | * activated optimistically to compete with the existing active pages. | |
67 | * | |
68 | * Approximating inactive page access frequency - Observations: | |
69 | * | |
70 | * 1. When a page is accessed for the first time, it is added to the | |
71 | * head of the inactive list, slides every existing inactive page | |
72 | * towards the tail by one slot, and pushes the current tail page | |
73 | * out of memory. | |
74 | * | |
75 | * 2. When a page is accessed for the second time, it is promoted to | |
76 | * the active list, shrinking the inactive list by one slot. This | |
77 | * also slides all inactive pages that were faulted into the cache | |
78 | * more recently than the activated page towards the tail of the | |
79 | * inactive list. | |
80 | * | |
81 | * Thus: | |
82 | * | |
83 | * 1. The sum of evictions and activations between any two points in | |
84 | * time indicate the minimum number of inactive pages accessed in | |
85 | * between. | |
86 | * | |
87 | * 2. Moving one inactive page N page slots towards the tail of the | |
88 | * list requires at least N inactive page accesses. | |
89 | * | |
90 | * Combining these: | |
91 | * | |
92 | * 1. When a page is finally evicted from memory, the number of | |
93 | * inactive pages accessed while the page was in cache is at least | |
94 | * the number of page slots on the inactive list. | |
95 | * | |
96 | * 2. In addition, measuring the sum of evictions and activations (E) | |
97 | * at the time of a page's eviction, and comparing it to another | |
98 | * reading (R) at the time the page faults back into memory tells | |
99 | * the minimum number of accesses while the page was not cached. | |
100 | * This is called the refault distance. | |
101 | * | |
102 | * Because the first access of the page was the fault and the second | |
103 | * access the refault, we combine the in-cache distance with the | |
104 | * out-of-cache distance to get the complete minimum access distance | |
105 | * of this page: | |
106 | * | |
107 | * NR_inactive + (R - E) | |
108 | * | |
109 | * And knowing the minimum access distance of a page, we can easily | |
110 | * tell if the page would be able to stay in cache assuming all page | |
111 | * slots in the cache were available: | |
112 | * | |
113 | * NR_inactive + (R - E) <= NR_inactive + NR_active | |
114 | * | |
ed8f3f99 YY |
115 | * If we have swap we should consider about NR_inactive_anon and |
116 | * NR_active_anon, so for page cache and anonymous respectively: | |
a528910e | 117 | * |
ed8f3f99 YY |
118 | * NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file |
119 | * + NR_inactive_anon + NR_active_anon | |
120 | * | |
121 | * NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon | |
122 | * + NR_inactive_file + NR_active_file | |
123 | * | |
124 | * Which can be further simplified to: | |
125 | * | |
126 | * (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon | |
127 | * | |
128 | * (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file | |
a528910e JW |
129 | * |
130 | * Put into words, the refault distance (out-of-cache) can be seen as | |
131 | * a deficit in inactive list space (in-cache). If the inactive list | |
132 | * had (R - E) more page slots, the page would not have been evicted | |
133 | * in between accesses, but activated instead. And on a full system, | |
134 | * the only thing eating into inactive list space is active pages. | |
135 | * | |
136 | * | |
1899ad18 | 137 | * Refaulting inactive pages |
a528910e JW |
138 | * |
139 | * All that is known about the active list is that the pages have been | |
140 | * accessed more than once in the past. This means that at any given | |
141 | * time there is actually a good chance that pages on the active list | |
142 | * are no longer in active use. | |
143 | * | |
144 | * So when a refault distance of (R - E) is observed and there are at | |
ed8f3f99 YY |
145 | * least (R - E) pages in the userspace workingset, the refaulting page |
146 | * is activated optimistically in the hope that (R - E) pages are actually | |
a528910e JW |
147 | * used less frequently than the refaulting page - or even not used at |
148 | * all anymore. | |
149 | * | |
1899ad18 JW |
150 | * That means if inactive cache is refaulting with a suitable refault |
151 | * distance, we assume the cache workingset is transitioning and put | |
ed8f3f99 | 152 | * pressure on the current workingset. |
1899ad18 | 153 | * |
a528910e JW |
154 | * If this is wrong and demotion kicks in, the pages which are truly |
155 | * used more frequently will be reactivated while the less frequently | |
156 | * used once will be evicted from memory. | |
157 | * | |
158 | * But if this is right, the stale pages will be pushed out of memory | |
159 | * and the used pages get to stay in cache. | |
160 | * | |
1899ad18 JW |
161 | * Refaulting active pages |
162 | * | |
163 | * If on the other hand the refaulting pages have recently been | |
164 | * deactivated, it means that the active list is no longer protecting | |
165 | * actively used cache from reclaim. The cache is NOT transitioning to | |
166 | * a different workingset; the existing workingset is thrashing in the | |
167 | * space allocated to the page cache. | |
168 | * | |
a528910e JW |
169 | * |
170 | * Implementation | |
171 | * | |
31d8fcac JW |
172 | * For each node's LRU lists, a counter for inactive evictions and |
173 | * activations is maintained (node->nonresident_age). | |
a528910e JW |
174 | * |
175 | * On eviction, a snapshot of this counter (along with some bits to | |
a97e7904 | 176 | * identify the node) is stored in the now empty page cache |
a528910e JW |
177 | * slot of the evicted page. This is called a shadow entry. |
178 | * | |
179 | * On cache misses for which there are shadow entries, an eligible | |
180 | * refault distance will immediately activate the refaulting page. | |
181 | */ | |
182 | ||
3ebc57f4 | 183 | #define WORKINGSET_SHIFT 1 |
3159f943 | 184 | #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ |
3ebc57f4 ML |
185 | WORKINGSET_SHIFT + NODES_SHIFT + \ |
186 | MEM_CGROUP_ID_SHIFT) | |
689c94f0 JW |
187 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) |
188 | ||
612e4493 JW |
189 | /* |
190 | * Eviction timestamps need to be able to cover the full range of | |
a97e7904 | 191 | * actionable refaults. However, bits are tight in the xarray |
612e4493 JW |
192 | * entry, and after storing the identifier for the lruvec there might |
193 | * not be enough left to represent every single actionable refault. In | |
194 | * that case, we have to sacrifice granularity for distance, and group | |
195 | * evictions into coarser buckets by shaving off lower timestamp bits. | |
196 | */ | |
197 | static unsigned int bucket_order __read_mostly; | |
198 | ||
1899ad18 JW |
199 | static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, |
200 | bool workingset) | |
a528910e | 201 | { |
3159f943 | 202 | eviction &= EVICTION_MASK; |
23047a96 | 203 | eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; |
1e6b1085 | 204 | eviction = (eviction << NODES_SHIFT) | pgdat->node_id; |
3ebc57f4 | 205 | eviction = (eviction << WORKINGSET_SHIFT) | workingset; |
a528910e | 206 | |
3159f943 | 207 | return xa_mk_value(eviction); |
a528910e JW |
208 | } |
209 | ||
1e6b1085 | 210 | static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, |
1899ad18 | 211 | unsigned long *evictionp, bool *workingsetp) |
a528910e | 212 | { |
3159f943 | 213 | unsigned long entry = xa_to_value(shadow); |
1e6b1085 | 214 | int memcgid, nid; |
1899ad18 | 215 | bool workingset; |
a528910e | 216 | |
3ebc57f4 ML |
217 | workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1); |
218 | entry >>= WORKINGSET_SHIFT; | |
a528910e JW |
219 | nid = entry & ((1UL << NODES_SHIFT) - 1); |
220 | entry >>= NODES_SHIFT; | |
23047a96 JW |
221 | memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); |
222 | entry >>= MEM_CGROUP_ID_SHIFT; | |
a528910e | 223 | |
23047a96 | 224 | *memcgidp = memcgid; |
1e6b1085 | 225 | *pgdat = NODE_DATA(nid); |
ac35a490 | 226 | *evictionp = entry; |
1899ad18 | 227 | *workingsetp = workingset; |
a528910e JW |
228 | } |
229 | ||
ac35a490 YZ |
230 | #ifdef CONFIG_LRU_GEN |
231 | ||
232 | static void *lru_gen_eviction(struct folio *folio) | |
233 | { | |
234 | int hist; | |
235 | unsigned long token; | |
236 | unsigned long min_seq; | |
237 | struct lruvec *lruvec; | |
391655fe | 238 | struct lru_gen_folio *lrugen; |
ac35a490 YZ |
239 | int type = folio_is_file_lru(folio); |
240 | int delta = folio_nr_pages(folio); | |
241 | int refs = folio_lru_refs(folio); | |
4d5d14a0 YZ |
242 | bool workingset = folio_test_workingset(folio); |
243 | int tier = lru_tier_from_refs(refs, workingset); | |
ac35a490 YZ |
244 | struct mem_cgroup *memcg = folio_memcg(folio); |
245 | struct pglist_data *pgdat = folio_pgdat(folio); | |
246 | ||
247 | BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT); | |
248 | ||
249 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
250 | lrugen = &lruvec->lrugen; | |
251 | min_seq = READ_ONCE(lrugen->min_seq[type]); | |
252 | token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0); | |
253 | ||
254 | hist = lru_hist_from_seq(min_seq); | |
255 | atomic_long_add(delta, &lrugen->evicted[hist][type][tier]); | |
256 | ||
4d5d14a0 | 257 | return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset); |
ac35a490 YZ |
258 | } |
259 | ||
ffcb5f52 NP |
260 | /* |
261 | * Tests if the shadow entry is for a folio that was recently evicted. | |
d7f1afd0 | 262 | * Fills in @lruvec, @token, @workingset with the values unpacked from shadow. |
ffcb5f52 | 263 | */ |
b1a71694 | 264 | static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, |
d7f1afd0 | 265 | unsigned long *token, bool *workingset) |
ffcb5f52 | 266 | { |
d7f1afd0 | 267 | int memcg_id; |
b1a71694 | 268 | unsigned long max_seq; |
d7f1afd0 A |
269 | struct mem_cgroup *memcg; |
270 | struct pglist_data *pgdat; | |
ffcb5f52 | 271 | |
d7f1afd0 | 272 | unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); |
ffcb5f52 | 273 | |
d7f1afd0 A |
274 | memcg = mem_cgroup_from_id(memcg_id); |
275 | *lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
ffcb5f52 | 276 | |
b1a71694 YZ |
277 | max_seq = READ_ONCE((*lruvec)->lrugen.max_seq); |
278 | max_seq &= EVICTION_MASK >> LRU_REFS_WIDTH; | |
279 | ||
280 | return abs_diff(max_seq, *token >> LRU_REFS_WIDTH) < MAX_NR_GENS; | |
ffcb5f52 NP |
281 | } |
282 | ||
ac35a490 YZ |
283 | static void lru_gen_refault(struct folio *folio, void *shadow) |
284 | { | |
3af0191a | 285 | bool recent; |
ac35a490 | 286 | int hist, tier, refs; |
ac35a490 YZ |
287 | bool workingset; |
288 | unsigned long token; | |
ac35a490 | 289 | struct lruvec *lruvec; |
391655fe | 290 | struct lru_gen_folio *lrugen; |
ac35a490 YZ |
291 | int type = folio_is_file_lru(folio); |
292 | int delta = folio_nr_pages(folio); | |
293 | ||
ac35a490 YZ |
294 | rcu_read_lock(); |
295 | ||
b1a71694 | 296 | recent = lru_gen_test_recent(shadow, &lruvec, &token, &workingset); |
3af0191a | 297 | if (lruvec != folio_lruvec(folio)) |
ac35a490 YZ |
298 | goto unlock; |
299 | ||
3af0191a KS |
300 | mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); |
301 | ||
302 | if (!recent) | |
ffcb5f52 NP |
303 | goto unlock; |
304 | ||
ac35a490 | 305 | lrugen = &lruvec->lrugen; |
ac35a490 | 306 | |
d7f1afd0 | 307 | hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type])); |
4d5d14a0 YZ |
308 | refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + 1; |
309 | tier = lru_tier_from_refs(refs, workingset); | |
ac35a490 YZ |
310 | |
311 | atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); | |
4d5d14a0 YZ |
312 | |
313 | /* see folio_add_lru() where folio_set_active() will be called */ | |
314 | if (lru_gen_in_fault()) | |
315 | mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); | |
316 | ||
317 | if (workingset) { | |
318 | folio_set_workingset(folio); | |
ac35a490 | 319 | mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); |
4d5d14a0 YZ |
320 | } else |
321 | set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); | |
ac35a490 YZ |
322 | unlock: |
323 | rcu_read_unlock(); | |
324 | } | |
325 | ||
326 | #else /* !CONFIG_LRU_GEN */ | |
327 | ||
328 | static void *lru_gen_eviction(struct folio *folio) | |
329 | { | |
330 | return NULL; | |
331 | } | |
332 | ||
b1a71694 | 333 | static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, |
d7f1afd0 | 334 | unsigned long *token, bool *workingset) |
ffcb5f52 NP |
335 | { |
336 | return false; | |
337 | } | |
338 | ||
ac35a490 YZ |
339 | static void lru_gen_refault(struct folio *folio, void *shadow) |
340 | { | |
341 | } | |
342 | ||
343 | #endif /* CONFIG_LRU_GEN */ | |
344 | ||
31d8fcac JW |
345 | /** |
346 | * workingset_age_nonresident - age non-resident entries as LRU ages | |
e755f4af | 347 | * @lruvec: the lruvec that was aged |
31d8fcac JW |
348 | * @nr_pages: the number of pages to count |
349 | * | |
350 | * As in-memory pages are aged, non-resident pages need to be aged as | |
351 | * well, in order for the refault distances later on to be comparable | |
352 | * to the in-memory dimensions. This function allows reclaim and LRU | |
353 | * operations to drive the non-resident aging along in parallel. | |
354 | */ | |
355 | void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) | |
b910718a JW |
356 | { |
357 | /* | |
358 | * Reclaiming a cgroup means reclaiming all its children in a | |
359 | * round-robin fashion. That means that each cgroup has an LRU | |
360 | * order that is composed of the LRU orders of its child | |
361 | * cgroups; and every page has an LRU position not just in the | |
362 | * cgroup that owns it, but in all of that group's ancestors. | |
363 | * | |
364 | * So when the physical inactive list of a leaf cgroup ages, | |
365 | * the virtual inactive lists of all its parents, including | |
366 | * the root cgroup's, age as well. | |
367 | */ | |
368 | do { | |
31d8fcac JW |
369 | atomic_long_add(nr_pages, &lruvec->nonresident_age); |
370 | } while ((lruvec = parent_lruvec(lruvec))); | |
b910718a JW |
371 | } |
372 | ||
a528910e | 373 | /** |
8927f647 | 374 | * workingset_eviction - note the eviction of a folio from memory |
b910718a | 375 | * @target_memcg: the cgroup that is causing the reclaim |
8927f647 | 376 | * @folio: the folio being evicted |
a528910e | 377 | * |
8927f647 MWO |
378 | * Return: a shadow entry to be stored in @folio->mapping->i_pages in place |
379 | * of the evicted @folio so that a later refault can be detected. | |
a528910e | 380 | */ |
8927f647 | 381 | void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) |
a528910e | 382 | { |
8927f647 | 383 | struct pglist_data *pgdat = folio_pgdat(folio); |
a528910e | 384 | unsigned long eviction; |
23047a96 | 385 | struct lruvec *lruvec; |
b910718a | 386 | int memcgid; |
a528910e | 387 | |
8927f647 MWO |
388 | /* Folio is fully exclusive and pins folio's memory cgroup pointer */ |
389 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); | |
390 | VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); | |
391 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); | |
23047a96 | 392 | |
ac35a490 YZ |
393 | if (lru_gen_enabled()) |
394 | return lru_gen_eviction(folio); | |
395 | ||
b910718a JW |
396 | lruvec = mem_cgroup_lruvec(target_memcg, pgdat); |
397 | /* XXX: target_memcg can be NULL, go through lruvec */ | |
398 | memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); | |
31d8fcac | 399 | eviction = atomic_long_read(&lruvec->nonresident_age); |
ac35a490 | 400 | eviction >>= bucket_order; |
8927f647 MWO |
401 | workingset_age_nonresident(lruvec, folio_nr_pages(folio)); |
402 | return pack_shadow(memcgid, pgdat, eviction, | |
403 | folio_test_workingset(folio)); | |
a528910e JW |
404 | } |
405 | ||
406 | /** | |
ffcb5f52 NP |
407 | * workingset_test_recent - tests if the shadow entry is for a folio that was |
408 | * recently evicted. Also fills in @workingset with the value unpacked from | |
409 | * shadow. | |
410 | * @shadow: the shadow entry to be tested. | |
411 | * @file: whether the corresponding folio is from the file lru. | |
412 | * @workingset: where the workingset value unpacked from shadow should | |
413 | * be stored. | |
5a4d8944 | 414 | * @flush: whether to flush cgroup rstat. |
ffcb5f52 NP |
415 | * |
416 | * Return: true if the shadow is for a recently evicted folio; false otherwise. | |
a528910e | 417 | */ |
5a4d8944 NP |
418 | bool workingset_test_recent(void *shadow, bool file, bool *workingset, |
419 | bool flush) | |
a528910e | 420 | { |
b910718a JW |
421 | struct mem_cgroup *eviction_memcg; |
422 | struct lruvec *eviction_lruvec; | |
a528910e | 423 | unsigned long refault_distance; |
34e58cac | 424 | unsigned long workingset_size; |
162453bf | 425 | unsigned long refault; |
23047a96 | 426 | int memcgid; |
ffcb5f52 NP |
427 | struct pglist_data *pgdat; |
428 | unsigned long eviction; | |
a528910e | 429 | |
b0068472 | 430 | if (lru_gen_enabled()) { |
9cbfd1c3 | 431 | bool recent; |
b0068472 | 432 | |
9cbfd1c3 | 433 | rcu_read_lock(); |
b1a71694 | 434 | recent = lru_gen_test_recent(shadow, &eviction_lruvec, &eviction, workingset); |
b0068472 YA |
435 | rcu_read_unlock(); |
436 | return recent; | |
437 | } | |
438 | ||
9cbfd1c3 | 439 | rcu_read_lock(); |
ffcb5f52 | 440 | unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); |
ac35a490 | 441 | eviction <<= bucket_order; |
162453bf | 442 | |
23047a96 JW |
443 | /* |
444 | * Look up the memcg associated with the stored ID. It might | |
0995d7e5 | 445 | * have been deleted since the folio's eviction. |
23047a96 JW |
446 | * |
447 | * Note that in rare events the ID could have been recycled | |
0995d7e5 | 448 | * for a new cgroup that refaults a shared folio. This is |
23047a96 JW |
449 | * impossible to tell from the available data. However, this |
450 | * should be a rare and limited disturbance, and activations | |
451 | * are always speculative anyway. Ultimately, it's the aging | |
452 | * algorithm's job to shake out the minimum access frequency | |
453 | * for the active cache. | |
454 | * | |
455 | * XXX: On !CONFIG_MEMCG, this will always return NULL; it | |
456 | * would be better if the root_mem_cgroup existed in all | |
457 | * configurations instead. | |
458 | */ | |
b910718a | 459 | eviction_memcg = mem_cgroup_from_id(memcgid); |
9cbfd1c3 YZ |
460 | if (!mem_cgroup_tryget(eviction_memcg)) |
461 | eviction_memcg = NULL; | |
b0068472 YA |
462 | rcu_read_unlock(); |
463 | ||
9cbfd1c3 YZ |
464 | if (!mem_cgroup_disabled() && !eviction_memcg) |
465 | return false; | |
7d7ef0a4 YA |
466 | /* |
467 | * Flush stats (and potentially sleep) outside the RCU read section. | |
5a4d8944 NP |
468 | * |
469 | * Note that workingset_test_recent() itself might be called in RCU read | |
470 | * section (for e.g, in cachestat) - these callers need to skip flushing | |
471 | * stats (via the flush argument). | |
472 | * | |
7d7ef0a4 YA |
473 | * XXX: With per-memcg flushing and thresholding, is ratelimiting |
474 | * still needed here? | |
475 | */ | |
5a4d8944 NP |
476 | if (flush) |
477 | mem_cgroup_flush_stats_ratelimited(eviction_memcg); | |
ffcb5f52 | 478 | |
b910718a | 479 | eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); |
31d8fcac | 480 | refault = atomic_long_read(&eviction_lruvec->nonresident_age); |
162453bf JW |
481 | |
482 | /* | |
1899ad18 | 483 | * Calculate the refault distance |
162453bf | 484 | * |
1899ad18 | 485 | * The unsigned subtraction here gives an accurate distance |
31d8fcac | 486 | * across nonresident_age overflows in most cases. There is a |
1899ad18 JW |
487 | * special case: usually, shadow entries have a short lifetime |
488 | * and are either refaulted or reclaimed along with the inode | |
489 | * before they get too old. But it is not impossible for the | |
31d8fcac JW |
490 | * nonresident_age to lap a shadow entry in the field, which |
491 | * can then result in a false small refault distance, leading | |
492 | * to a false activation should this old entry actually | |
493 | * refault again. However, earlier kernels used to deactivate | |
1899ad18 JW |
494 | * unconditionally with *every* reclaim invocation for the |
495 | * longest time, so the occasional inappropriate activation | |
496 | * leading to pressure on the active list is not a problem. | |
162453bf JW |
497 | */ |
498 | refault_distance = (refault - eviction) & EVICTION_MASK; | |
499 | ||
1899ad18 JW |
500 | /* |
501 | * Compare the distance to the existing workingset size. We | |
34e58cac | 502 | * don't activate pages that couldn't stay resident even if |
aae466b0 JK |
503 | * all the memory was available to the workingset. Whether |
504 | * workingset competition needs to consider anon or not depends | |
ed8f3f99 | 505 | * on having free swap space. |
1899ad18 | 506 | */ |
34e58cac | 507 | workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); |
aae466b0 | 508 | if (!file) { |
34e58cac | 509 | workingset_size += lruvec_page_state(eviction_lruvec, |
aae466b0 JK |
510 | NR_INACTIVE_FILE); |
511 | } | |
f78dfc7b | 512 | if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) { |
34e58cac JW |
513 | workingset_size += lruvec_page_state(eviction_lruvec, |
514 | NR_ACTIVE_ANON); | |
aae466b0 JK |
515 | if (file) { |
516 | workingset_size += lruvec_page_state(eviction_lruvec, | |
517 | NR_INACTIVE_ANON); | |
518 | } | |
34e58cac | 519 | } |
ffcb5f52 | 520 | |
b0068472 | 521 | mem_cgroup_put(eviction_memcg); |
ffcb5f52 NP |
522 | return refault_distance <= workingset_size; |
523 | } | |
524 | ||
525 | /** | |
526 | * workingset_refault - Evaluate the refault of a previously evicted folio. | |
527 | * @folio: The freshly allocated replacement folio. | |
528 | * @shadow: Shadow entry of the evicted folio. | |
529 | * | |
530 | * Calculates and evaluates the refault distance of the previously | |
531 | * evicted folio in the context of the node and the memcg whose memory | |
532 | * pressure caused the eviction. | |
533 | */ | |
534 | void workingset_refault(struct folio *folio, void *shadow) | |
535 | { | |
536 | bool file = folio_is_file_lru(folio); | |
537 | struct pglist_data *pgdat; | |
538 | struct mem_cgroup *memcg; | |
539 | struct lruvec *lruvec; | |
540 | bool workingset; | |
541 | long nr; | |
542 | ||
9cbfd1c3 YZ |
543 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
544 | ||
ffcb5f52 NP |
545 | if (lru_gen_enabled()) { |
546 | lru_gen_refault(folio, shadow); | |
547 | return; | |
548 | } | |
549 | ||
ffcb5f52 NP |
550 | /* |
551 | * The activation decision for this folio is made at the level | |
552 | * where the eviction occurred, as that is where the LRU order | |
553 | * during folio reclaim is being determined. | |
554 | * | |
555 | * However, the cgroup that will own the folio is the one that | |
b0068472 YA |
556 | * is actually experiencing the refault event. Make sure the folio is |
557 | * locked to guarantee folio_memcg() stability throughout. | |
ffcb5f52 NP |
558 | */ |
559 | nr = folio_nr_pages(folio); | |
560 | memcg = folio_memcg(folio); | |
561 | pgdat = folio_pgdat(folio); | |
562 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
563 | ||
564 | mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); | |
565 | ||
5a4d8944 | 566 | if (!workingset_test_recent(shadow, file, &workingset, true)) |
b0068472 | 567 | return; |
1899ad18 | 568 | |
0995d7e5 MWO |
569 | folio_set_active(folio); |
570 | workingset_age_nonresident(lruvec, nr); | |
571 | mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr); | |
1899ad18 | 572 | |
0995d7e5 | 573 | /* Folio was active prior to eviction */ |
1899ad18 | 574 | if (workingset) { |
0995d7e5 | 575 | folio_set_workingset(folio); |
6e1ca48d VMO |
576 | /* |
577 | * XXX: Move to folio_add_lru() when it supports new vs | |
578 | * putback | |
579 | */ | |
0538a82c | 580 | lru_note_cost_refault(folio); |
0995d7e5 | 581 | mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); |
a528910e | 582 | } |
a528910e JW |
583 | } |
584 | ||
585 | /** | |
586 | * workingset_activation - note a page activation | |
c5ce619a | 587 | * @folio: Folio that is being activated. |
a528910e | 588 | */ |
c5ce619a | 589 | void workingset_activation(struct folio *folio) |
a528910e | 590 | { |
23047a96 JW |
591 | /* |
592 | * Filter non-memcg pages here, e.g. unmap can call | |
593 | * mark_page_accessed() on VDSO pages. | |
23047a96 | 594 | */ |
906c38ff SB |
595 | if (mem_cgroup_disabled() || folio_memcg_charged(folio)) |
596 | workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio)); | |
a528910e | 597 | } |
449dd698 JW |
598 | |
599 | /* | |
600 | * Shadow entries reflect the share of the working set that does not | |
601 | * fit into memory, so their number depends on the access pattern of | |
602 | * the workload. In most cases, they will refault or get reclaimed | |
603 | * along with the inode, but a (malicious) workload that streams | |
604 | * through files with a total size several times that of available | |
605 | * memory, while preventing the inodes from being reclaimed, can | |
606 | * create excessive amounts of shadow nodes. To keep a lid on this, | |
607 | * track shadow nodes and reclaim them when they grow way past the | |
608 | * point where they would still be useful. | |
609 | */ | |
610 | ||
9bbdc0f3 | 611 | struct list_lru shadow_nodes; |
14b46879 | 612 | |
a97e7904 | 613 | void workingset_update_node(struct xa_node *node) |
14b46879 | 614 | { |
4715c6a7 | 615 | struct page *page = virt_to_page(node); |
2386eef2 | 616 | |
14b46879 JW |
617 | /* |
618 | * Track non-empty nodes that contain only shadow entries; | |
619 | * unlink those that contain pages or are being freed. | |
620 | * | |
621 | * Avoid acquiring the list_lru lock when the nodes are | |
622 | * already where they should be. The list_empty() test is safe | |
b93b0163 | 623 | * as node->private_list is protected by the i_pages lock. |
14b46879 | 624 | */ |
551c643f | 625 | lockdep_assert_held(&node->array->xa_lock); |
68d48e6a | 626 | |
01959dfe | 627 | if (node->count && node->count == node->nr_values) { |
68d48e6a | 628 | if (list_empty(&node->private_list)) { |
0a97c01c | 629 | list_lru_add_obj(&shadow_nodes, &node->private_list); |
4715c6a7 | 630 | __inc_node_page_state(page, WORKINGSET_NODES); |
68d48e6a | 631 | } |
14b46879 | 632 | } else { |
68d48e6a | 633 | if (!list_empty(&node->private_list)) { |
0a97c01c | 634 | list_lru_del_obj(&shadow_nodes, &node->private_list); |
4715c6a7 | 635 | __dec_node_page_state(page, WORKINGSET_NODES); |
68d48e6a | 636 | } |
14b46879 JW |
637 | } |
638 | } | |
449dd698 JW |
639 | |
640 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, | |
641 | struct shrink_control *sc) | |
642 | { | |
449dd698 | 643 | unsigned long max_nodes; |
14b46879 | 644 | unsigned long nodes; |
95f9ab2d | 645 | unsigned long pages; |
449dd698 | 646 | |
14b46879 | 647 | nodes = list_lru_shrink_count(&shadow_nodes, sc); |
725cac1c ML |
648 | if (!nodes) |
649 | return SHRINK_EMPTY; | |
449dd698 | 650 | |
449dd698 | 651 | /* |
a97e7904 | 652 | * Approximate a reasonable limit for the nodes |
b5388998 JW |
653 | * containing shadow entries. We don't need to keep more |
654 | * shadow entries than possible pages on the active list, | |
655 | * since refault distances bigger than that are dismissed. | |
656 | * | |
657 | * The size of the active list converges toward 100% of | |
658 | * overall page cache as memory grows, with only a tiny | |
659 | * inactive list. Assume the total cache size for that. | |
660 | * | |
661 | * Nodes might be sparsely populated, with only one shadow | |
662 | * entry in the extreme case. Obviously, we cannot keep one | |
663 | * node for every eligible shadow entry, so compromise on a | |
664 | * worst-case density of 1/8th. Below that, not all eligible | |
665 | * refaults can be detected anymore. | |
449dd698 | 666 | * |
a97e7904 | 667 | * On 64-bit with 7 xa_nodes per page and 64 slots |
449dd698 | 668 | * each, this will reclaim shadow entries when they consume |
b5388998 | 669 | * ~1.8% of available memory: |
449dd698 | 670 | * |
a97e7904 | 671 | * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE |
449dd698 | 672 | */ |
95f9ab2d | 673 | #ifdef CONFIG_MEMCG |
b5388998 | 674 | if (sc->memcg) { |
95f9ab2d | 675 | struct lruvec *lruvec; |
2b487e59 | 676 | int i; |
95f9ab2d | 677 | |
d4a5b369 | 678 | mem_cgroup_flush_stats_ratelimited(sc->memcg); |
867e5e1d | 679 | lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); |
2b487e59 | 680 | for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) |
205b20cc JW |
681 | pages += lruvec_page_state_local(lruvec, |
682 | NR_LRU_BASE + i); | |
d42f3245 RG |
683 | pages += lruvec_page_state_local( |
684 | lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; | |
685 | pages += lruvec_page_state_local( | |
686 | lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; | |
95f9ab2d JW |
687 | } else |
688 | #endif | |
689 | pages = node_present_pages(sc->nid); | |
690 | ||
dad4f140 | 691 | max_nodes = pages >> (XA_CHUNK_SHIFT - 3); |
449dd698 | 692 | |
14b46879 | 693 | if (nodes <= max_nodes) |
449dd698 | 694 | return 0; |
14b46879 | 695 | return nodes - max_nodes; |
449dd698 JW |
696 | } |
697 | ||
698 | static enum lru_status shadow_lru_isolate(struct list_head *item, | |
3f97b163 | 699 | struct list_lru_one *lru, |
da0c0251 | 700 | void *arg) __must_hold(lru->lock) |
449dd698 | 701 | { |
a97e7904 | 702 | struct xa_node *node = container_of(item, struct xa_node, private_list); |
449dd698 | 703 | struct address_space *mapping; |
449dd698 JW |
704 | int ret; |
705 | ||
706 | /* | |
f82cd2f0 | 707 | * Page cache insertions and deletions synchronously maintain |
b93b0163 | 708 | * the shadow node LRU under the i_pages lock and the |
da0c0251 KS |
709 | * &lru->lock. Because the page cache tree is emptied before |
710 | * the inode can be destroyed, holding the &lru->lock pins any | |
a97e7904 | 711 | * address_space that has nodes on the LRU. |
449dd698 | 712 | * |
b93b0163 | 713 | * We can then safely transition to the i_pages lock to |
449dd698 | 714 | * pin only the address_space of the particular node we want |
da0c0251 | 715 | * to reclaim, take the node off-LRU, and drop the &lru->lock. |
449dd698 JW |
716 | */ |
717 | ||
01959dfe | 718 | mapping = container_of(node->array, struct address_space, i_pages); |
449dd698 JW |
719 | |
720 | /* Coming from the list, invert the lock order */ | |
b93b0163 | 721 | if (!xa_trylock(&mapping->i_pages)) { |
da0c0251 | 722 | spin_unlock_irq(&lru->lock); |
449dd698 JW |
723 | ret = LRU_RETRY; |
724 | goto out; | |
725 | } | |
726 | ||
5649d113 YY |
727 | /* For page cache we need to hold i_lock */ |
728 | if (mapping->host != NULL) { | |
729 | if (!spin_trylock(&mapping->host->i_lock)) { | |
730 | xa_unlock(&mapping->i_pages); | |
da0c0251 | 731 | spin_unlock_irq(&lru->lock); |
5649d113 YY |
732 | ret = LRU_RETRY; |
733 | goto out; | |
734 | } | |
51b8c1fe JW |
735 | } |
736 | ||
3f97b163 | 737 | list_lru_isolate(lru, item); |
4715c6a7 | 738 | __dec_node_page_state(virt_to_page(node), WORKINGSET_NODES); |
68d48e6a | 739 | |
da0c0251 | 740 | spin_unlock(&lru->lock); |
449dd698 JW |
741 | |
742 | /* | |
743 | * The nodes should only contain one or more shadow entries, | |
744 | * no pages, so we expect to be able to remove them all and | |
745 | * delete and free the empty node afterwards. | |
746 | */ | |
01959dfe | 747 | if (WARN_ON_ONCE(!node->nr_values)) |
b936887e | 748 | goto out_invalid; |
01959dfe | 749 | if (WARN_ON_ONCE(node->count != node->nr_values)) |
b936887e | 750 | goto out_invalid; |
f82cd2f0 | 751 | xa_delete_node(node, workingset_update_node); |
da3ceeff | 752 | __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); |
449dd698 | 753 | |
b936887e | 754 | out_invalid: |
6ca342d0 | 755 | xa_unlock_irq(&mapping->i_pages); |
5649d113 YY |
756 | if (mapping->host != NULL) { |
757 | if (mapping_shrinkable(mapping)) | |
758 | inode_add_lru(mapping->host); | |
759 | spin_unlock(&mapping->host->i_lock); | |
760 | } | |
449dd698 JW |
761 | ret = LRU_REMOVED_RETRY; |
762 | out: | |
449dd698 | 763 | cond_resched(); |
449dd698 JW |
764 | return ret; |
765 | } | |
766 | ||
767 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, | |
768 | struct shrink_control *sc) | |
769 | { | |
b93b0163 | 770 | /* list_lru lock nests inside the IRQ-safe i_pages lock */ |
6b51e881 SAS |
771 | return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, |
772 | NULL); | |
449dd698 JW |
773 | } |
774 | ||
449dd698 JW |
775 | /* |
776 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe | |
b93b0163 | 777 | * i_pages lock. |
449dd698 JW |
778 | */ |
779 | static struct lock_class_key shadow_nodes_key; | |
780 | ||
781 | static int __init workingset_init(void) | |
782 | { | |
219c666e | 783 | struct shrinker *workingset_shadow_shrinker; |
612e4493 JW |
784 | unsigned int timestamp_bits; |
785 | unsigned int max_order; | |
219c666e | 786 | int ret = -ENOMEM; |
449dd698 | 787 | |
612e4493 JW |
788 | BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); |
789 | /* | |
790 | * Calculate the eviction bucket size to cover the longest | |
791 | * actionable refault distance, which is currently half of | |
792 | * memory (totalram_pages/2). However, memory hotplug may add | |
793 | * some more pages at runtime, so keep working with up to | |
794 | * double the initial memory by using totalram_pages as-is. | |
795 | */ | |
796 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; | |
ca79b0c2 | 797 | max_order = fls_long(totalram_pages() - 1); |
612e4493 JW |
798 | if (max_order > timestamp_bits) |
799 | bucket_order = max_order - timestamp_bits; | |
d3d36c4b | 800 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", |
612e4493 JW |
801 | timestamp_bits, max_order, bucket_order); |
802 | ||
219c666e QZ |
803 | workingset_shadow_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | |
804 | SHRINKER_MEMCG_AWARE, | |
805 | "mm-shadow"); | |
806 | if (!workingset_shadow_shrinker) | |
449dd698 | 807 | goto err; |
219c666e | 808 | |
3f28bbe5 KS |
809 | ret = list_lru_init_memcg_key(&shadow_nodes, workingset_shadow_shrinker, |
810 | &shadow_nodes_key); | |
449dd698 JW |
811 | if (ret) |
812 | goto err_list_lru; | |
219c666e QZ |
813 | |
814 | workingset_shadow_shrinker->count_objects = count_shadow_nodes; | |
815 | workingset_shadow_shrinker->scan_objects = scan_shadow_nodes; | |
816 | /* ->count reports only fully expendable nodes */ | |
817 | workingset_shadow_shrinker->seeks = 0; | |
818 | ||
819 | shrinker_register(workingset_shadow_shrinker); | |
449dd698 JW |
820 | return 0; |
821 | err_list_lru: | |
219c666e | 822 | shrinker_free(workingset_shadow_shrinker); |
449dd698 JW |
823 | err: |
824 | return ret; | |
825 | } | |
826 | module_init(workingset_init); |