]> git.ipfire.org Git - people/ms/linux.git/blame - mm/workingset.c
ARM: add ATAGS dependencies to non-DT platforms
[people/ms/linux.git] / mm / workingset.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a528910e
JW
2/*
3 * Workingset detection
4 *
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6 */
7
8#include <linux/memcontrol.h>
170b04b7 9#include <linux/mm_inline.h>
a528910e 10#include <linux/writeback.h>
3a4f8a0b 11#include <linux/shmem_fs.h>
a528910e
JW
12#include <linux/pagemap.h>
13#include <linux/atomic.h>
14#include <linux/module.h>
15#include <linux/swap.h>
14b46879 16#include <linux/dax.h>
a528910e
JW
17#include <linux/fs.h>
18#include <linux/mm.h>
19
20/*
21 * Double CLOCK lists
22 *
1e6b1085 23 * Per node, two clock lists are maintained for file pages: the
a528910e
JW
24 * inactive and the active list. Freshly faulted pages start out at
25 * the head of the inactive list and page reclaim scans pages from the
26 * tail. Pages that are accessed multiple times on the inactive list
27 * are promoted to the active list, to protect them from reclaim,
28 * whereas active pages are demoted to the inactive list when the
29 * active list grows too big.
30 *
31 * fault ------------------------+
32 * |
33 * +--------------+ | +-------------+
34 * reclaim <- | inactive | <-+-- demotion | active | <--+
35 * +--------------+ +-------------+ |
36 * | |
37 * +-------------- promotion ------------------+
38 *
39 *
40 * Access frequency and refault distance
41 *
42 * A workload is thrashing when its pages are frequently used but they
43 * are evicted from the inactive list every time before another access
44 * would have promoted them to the active list.
45 *
46 * In cases where the average access distance between thrashing pages
47 * is bigger than the size of memory there is nothing that can be
48 * done - the thrashing set could never fit into memory under any
49 * circumstance.
50 *
51 * However, the average access distance could be bigger than the
52 * inactive list, yet smaller than the size of memory. In this case,
53 * the set could fit into memory if it weren't for the currently
54 * active pages - which may be used more, hopefully less frequently:
55 *
56 * +-memory available to cache-+
57 * | |
58 * +-inactive------+-active----+
59 * a b | c d e f g h i | J K L M N |
60 * +---------------+-----------+
61 *
62 * It is prohibitively expensive to accurately track access frequency
63 * of pages. But a reasonable approximation can be made to measure
64 * thrashing on the inactive list, after which refaulting pages can be
65 * activated optimistically to compete with the existing active pages.
66 *
67 * Approximating inactive page access frequency - Observations:
68 *
69 * 1. When a page is accessed for the first time, it is added to the
70 * head of the inactive list, slides every existing inactive page
71 * towards the tail by one slot, and pushes the current tail page
72 * out of memory.
73 *
74 * 2. When a page is accessed for the second time, it is promoted to
75 * the active list, shrinking the inactive list by one slot. This
76 * also slides all inactive pages that were faulted into the cache
77 * more recently than the activated page towards the tail of the
78 * inactive list.
79 *
80 * Thus:
81 *
82 * 1. The sum of evictions and activations between any two points in
83 * time indicate the minimum number of inactive pages accessed in
84 * between.
85 *
86 * 2. Moving one inactive page N page slots towards the tail of the
87 * list requires at least N inactive page accesses.
88 *
89 * Combining these:
90 *
91 * 1. When a page is finally evicted from memory, the number of
92 * inactive pages accessed while the page was in cache is at least
93 * the number of page slots on the inactive list.
94 *
95 * 2. In addition, measuring the sum of evictions and activations (E)
96 * at the time of a page's eviction, and comparing it to another
97 * reading (R) at the time the page faults back into memory tells
98 * the minimum number of accesses while the page was not cached.
99 * This is called the refault distance.
100 *
101 * Because the first access of the page was the fault and the second
102 * access the refault, we combine the in-cache distance with the
103 * out-of-cache distance to get the complete minimum access distance
104 * of this page:
105 *
106 * NR_inactive + (R - E)
107 *
108 * And knowing the minimum access distance of a page, we can easily
109 * tell if the page would be able to stay in cache assuming all page
110 * slots in the cache were available:
111 *
112 * NR_inactive + (R - E) <= NR_inactive + NR_active
113 *
114 * which can be further simplified to
115 *
116 * (R - E) <= NR_active
117 *
118 * Put into words, the refault distance (out-of-cache) can be seen as
119 * a deficit in inactive list space (in-cache). If the inactive list
120 * had (R - E) more page slots, the page would not have been evicted
121 * in between accesses, but activated instead. And on a full system,
122 * the only thing eating into inactive list space is active pages.
123 *
124 *
1899ad18 125 * Refaulting inactive pages
a528910e
JW
126 *
127 * All that is known about the active list is that the pages have been
128 * accessed more than once in the past. This means that at any given
129 * time there is actually a good chance that pages on the active list
130 * are no longer in active use.
131 *
132 * So when a refault distance of (R - E) is observed and there are at
133 * least (R - E) active pages, the refaulting page is activated
134 * optimistically in the hope that (R - E) active pages are actually
135 * used less frequently than the refaulting page - or even not used at
136 * all anymore.
137 *
1899ad18
JW
138 * That means if inactive cache is refaulting with a suitable refault
139 * distance, we assume the cache workingset is transitioning and put
140 * pressure on the current active list.
141 *
a528910e
JW
142 * If this is wrong and demotion kicks in, the pages which are truly
143 * used more frequently will be reactivated while the less frequently
144 * used once will be evicted from memory.
145 *
146 * But if this is right, the stale pages will be pushed out of memory
147 * and the used pages get to stay in cache.
148 *
1899ad18
JW
149 * Refaulting active pages
150 *
151 * If on the other hand the refaulting pages have recently been
152 * deactivated, it means that the active list is no longer protecting
153 * actively used cache from reclaim. The cache is NOT transitioning to
154 * a different workingset; the existing workingset is thrashing in the
155 * space allocated to the page cache.
156 *
a528910e
JW
157 *
158 * Implementation
159 *
31d8fcac
JW
160 * For each node's LRU lists, a counter for inactive evictions and
161 * activations is maintained (node->nonresident_age).
a528910e
JW
162 *
163 * On eviction, a snapshot of this counter (along with some bits to
a97e7904 164 * identify the node) is stored in the now empty page cache
a528910e
JW
165 * slot of the evicted page. This is called a shadow entry.
166 *
167 * On cache misses for which there are shadow entries, an eligible
168 * refault distance will immediately activate the refaulting page.
169 */
170
3ebc57f4 171#define WORKINGSET_SHIFT 1
3159f943 172#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
3ebc57f4
ML
173 WORKINGSET_SHIFT + NODES_SHIFT + \
174 MEM_CGROUP_ID_SHIFT)
689c94f0
JW
175#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
176
612e4493
JW
177/*
178 * Eviction timestamps need to be able to cover the full range of
a97e7904 179 * actionable refaults. However, bits are tight in the xarray
612e4493
JW
180 * entry, and after storing the identifier for the lruvec there might
181 * not be enough left to represent every single actionable refault. In
182 * that case, we have to sacrifice granularity for distance, and group
183 * evictions into coarser buckets by shaving off lower timestamp bits.
184 */
185static unsigned int bucket_order __read_mostly;
186
1899ad18
JW
187static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
188 bool workingset)
a528910e 189{
612e4493 190 eviction >>= bucket_order;
3159f943 191 eviction &= EVICTION_MASK;
23047a96 192 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
1e6b1085 193 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
3ebc57f4 194 eviction = (eviction << WORKINGSET_SHIFT) | workingset;
a528910e 195
3159f943 196 return xa_mk_value(eviction);
a528910e
JW
197}
198
1e6b1085 199static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
1899ad18 200 unsigned long *evictionp, bool *workingsetp)
a528910e 201{
3159f943 202 unsigned long entry = xa_to_value(shadow);
1e6b1085 203 int memcgid, nid;
1899ad18 204 bool workingset;
a528910e 205
3ebc57f4
ML
206 workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1);
207 entry >>= WORKINGSET_SHIFT;
a528910e
JW
208 nid = entry & ((1UL << NODES_SHIFT) - 1);
209 entry >>= NODES_SHIFT;
23047a96
JW
210 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
211 entry >>= MEM_CGROUP_ID_SHIFT;
a528910e 212
23047a96 213 *memcgidp = memcgid;
1e6b1085 214 *pgdat = NODE_DATA(nid);
612e4493 215 *evictionp = entry << bucket_order;
1899ad18 216 *workingsetp = workingset;
a528910e
JW
217}
218
31d8fcac
JW
219/**
220 * workingset_age_nonresident - age non-resident entries as LRU ages
e755f4af 221 * @lruvec: the lruvec that was aged
31d8fcac
JW
222 * @nr_pages: the number of pages to count
223 *
224 * As in-memory pages are aged, non-resident pages need to be aged as
225 * well, in order for the refault distances later on to be comparable
226 * to the in-memory dimensions. This function allows reclaim and LRU
227 * operations to drive the non-resident aging along in parallel.
228 */
229void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
b910718a
JW
230{
231 /*
232 * Reclaiming a cgroup means reclaiming all its children in a
233 * round-robin fashion. That means that each cgroup has an LRU
234 * order that is composed of the LRU orders of its child
235 * cgroups; and every page has an LRU position not just in the
236 * cgroup that owns it, but in all of that group's ancestors.
237 *
238 * So when the physical inactive list of a leaf cgroup ages,
239 * the virtual inactive lists of all its parents, including
240 * the root cgroup's, age as well.
241 */
242 do {
31d8fcac
JW
243 atomic_long_add(nr_pages, &lruvec->nonresident_age);
244 } while ((lruvec = parent_lruvec(lruvec)));
b910718a
JW
245}
246
a528910e 247/**
8927f647 248 * workingset_eviction - note the eviction of a folio from memory
b910718a 249 * @target_memcg: the cgroup that is causing the reclaim
8927f647 250 * @folio: the folio being evicted
a528910e 251 *
8927f647
MWO
252 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
253 * of the evicted @folio so that a later refault can be detected.
a528910e 254 */
8927f647 255void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
a528910e 256{
8927f647 257 struct pglist_data *pgdat = folio_pgdat(folio);
a528910e 258 unsigned long eviction;
23047a96 259 struct lruvec *lruvec;
b910718a 260 int memcgid;
a528910e 261
8927f647
MWO
262 /* Folio is fully exclusive and pins folio's memory cgroup pointer */
263 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
264 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
265 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
23047a96 266
b910718a
JW
267 lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
268 /* XXX: target_memcg can be NULL, go through lruvec */
269 memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
31d8fcac 270 eviction = atomic_long_read(&lruvec->nonresident_age);
8927f647
MWO
271 workingset_age_nonresident(lruvec, folio_nr_pages(folio));
272 return pack_shadow(memcgid, pgdat, eviction,
273 folio_test_workingset(folio));
a528910e
JW
274}
275
276/**
0995d7e5
MWO
277 * workingset_refault - Evaluate the refault of a previously evicted folio.
278 * @folio: The freshly allocated replacement folio.
279 * @shadow: Shadow entry of the evicted folio.
a528910e
JW
280 *
281 * Calculates and evaluates the refault distance of the previously
0995d7e5 282 * evicted folio in the context of the node and the memcg whose memory
b910718a 283 * pressure caused the eviction.
a528910e 284 */
0995d7e5 285void workingset_refault(struct folio *folio, void *shadow)
a528910e 286{
0995d7e5 287 bool file = folio_is_file_lru(folio);
b910718a
JW
288 struct mem_cgroup *eviction_memcg;
289 struct lruvec *eviction_lruvec;
a528910e 290 unsigned long refault_distance;
34e58cac 291 unsigned long workingset_size;
1899ad18 292 struct pglist_data *pgdat;
23047a96 293 struct mem_cgroup *memcg;
162453bf 294 unsigned long eviction;
23047a96 295 struct lruvec *lruvec;
162453bf 296 unsigned long refault;
1899ad18 297 bool workingset;
23047a96 298 int memcgid;
0995d7e5 299 long nr;
a528910e 300
1899ad18 301 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
162453bf 302
23047a96
JW
303 rcu_read_lock();
304 /*
305 * Look up the memcg associated with the stored ID. It might
0995d7e5 306 * have been deleted since the folio's eviction.
23047a96
JW
307 *
308 * Note that in rare events the ID could have been recycled
0995d7e5 309 * for a new cgroup that refaults a shared folio. This is
23047a96
JW
310 * impossible to tell from the available data. However, this
311 * should be a rare and limited disturbance, and activations
312 * are always speculative anyway. Ultimately, it's the aging
313 * algorithm's job to shake out the minimum access frequency
314 * for the active cache.
315 *
316 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
317 * would be better if the root_mem_cgroup existed in all
318 * configurations instead.
319 */
b910718a
JW
320 eviction_memcg = mem_cgroup_from_id(memcgid);
321 if (!mem_cgroup_disabled() && !eviction_memcg)
1899ad18 322 goto out;
b910718a 323 eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
31d8fcac 324 refault = atomic_long_read(&eviction_lruvec->nonresident_age);
162453bf
JW
325
326 /*
1899ad18 327 * Calculate the refault distance
162453bf 328 *
1899ad18 329 * The unsigned subtraction here gives an accurate distance
31d8fcac 330 * across nonresident_age overflows in most cases. There is a
1899ad18
JW
331 * special case: usually, shadow entries have a short lifetime
332 * and are either refaulted or reclaimed along with the inode
333 * before they get too old. But it is not impossible for the
31d8fcac
JW
334 * nonresident_age to lap a shadow entry in the field, which
335 * can then result in a false small refault distance, leading
336 * to a false activation should this old entry actually
337 * refault again. However, earlier kernels used to deactivate
1899ad18
JW
338 * unconditionally with *every* reclaim invocation for the
339 * longest time, so the occasional inappropriate activation
340 * leading to pressure on the active list is not a problem.
162453bf
JW
341 */
342 refault_distance = (refault - eviction) & EVICTION_MASK;
343
b910718a 344 /*
0995d7e5 345 * The activation decision for this folio is made at the level
b910718a 346 * where the eviction occurred, as that is where the LRU order
0995d7e5 347 * during folio reclaim is being determined.
b910718a 348 *
0995d7e5 349 * However, the cgroup that will own the folio is the one that
b910718a
JW
350 * is actually experiencing the refault event.
351 */
0995d7e5
MWO
352 nr = folio_nr_pages(folio);
353 memcg = folio_memcg(folio);
b910718a
JW
354 lruvec = mem_cgroup_lruvec(memcg, pgdat);
355
0995d7e5 356 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
a528910e 357
9b301615 358 mem_cgroup_flush_stats_delayed();
1899ad18
JW
359 /*
360 * Compare the distance to the existing workingset size. We
34e58cac 361 * don't activate pages that couldn't stay resident even if
aae466b0
JK
362 * all the memory was available to the workingset. Whether
363 * workingset competition needs to consider anon or not depends
364 * on having swap.
1899ad18 365 */
34e58cac 366 workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
aae466b0 367 if (!file) {
34e58cac 368 workingset_size += lruvec_page_state(eviction_lruvec,
aae466b0
JK
369 NR_INACTIVE_FILE);
370 }
371 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
34e58cac
JW
372 workingset_size += lruvec_page_state(eviction_lruvec,
373 NR_ACTIVE_ANON);
aae466b0
JK
374 if (file) {
375 workingset_size += lruvec_page_state(eviction_lruvec,
376 NR_INACTIVE_ANON);
377 }
34e58cac
JW
378 }
379 if (refault_distance > workingset_size)
1899ad18
JW
380 goto out;
381
0995d7e5
MWO
382 folio_set_active(folio);
383 workingset_age_nonresident(lruvec, nr);
384 mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr);
1899ad18 385
0995d7e5 386 /* Folio was active prior to eviction */
1899ad18 387 if (workingset) {
0995d7e5 388 folio_set_workingset(folio);
314b57fb 389 /* XXX: Move to lru_cache_add() when it supports new vs putback */
0995d7e5
MWO
390 lru_note_cost_folio(folio);
391 mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
a528910e 392 }
1899ad18 393out:
2a2e4885 394 rcu_read_unlock();
a528910e
JW
395}
396
397/**
398 * workingset_activation - note a page activation
c5ce619a 399 * @folio: Folio that is being activated.
a528910e 400 */
c5ce619a 401void workingset_activation(struct folio *folio)
a528910e 402{
55779ec7 403 struct mem_cgroup *memcg;
23047a96 404
55779ec7 405 rcu_read_lock();
23047a96
JW
406 /*
407 * Filter non-memcg pages here, e.g. unmap can call
408 * mark_page_accessed() on VDSO pages.
409 *
410 * XXX: See workingset_refault() - this should return
411 * root_mem_cgroup even for !CONFIG_MEMCG.
412 */
c5ce619a 413 memcg = folio_memcg_rcu(folio);
55779ec7 414 if (!mem_cgroup_disabled() && !memcg)
23047a96 415 goto out;
c5ce619a 416 workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio));
23047a96 417out:
55779ec7 418 rcu_read_unlock();
a528910e 419}
449dd698
JW
420
421/*
422 * Shadow entries reflect the share of the working set that does not
423 * fit into memory, so their number depends on the access pattern of
424 * the workload. In most cases, they will refault or get reclaimed
425 * along with the inode, but a (malicious) workload that streams
426 * through files with a total size several times that of available
427 * memory, while preventing the inodes from being reclaimed, can
428 * create excessive amounts of shadow nodes. To keep a lid on this,
429 * track shadow nodes and reclaim them when they grow way past the
430 * point where they would still be useful.
431 */
432
9bbdc0f3 433struct list_lru shadow_nodes;
14b46879 434
a97e7904 435void workingset_update_node(struct xa_node *node)
14b46879 436{
2386eef2
SAS
437 struct address_space *mapping;
438
14b46879
JW
439 /*
440 * Track non-empty nodes that contain only shadow entries;
441 * unlink those that contain pages or are being freed.
442 *
443 * Avoid acquiring the list_lru lock when the nodes are
444 * already where they should be. The list_empty() test is safe
b93b0163 445 * as node->private_list is protected by the i_pages lock.
14b46879 446 */
2386eef2
SAS
447 mapping = container_of(node->array, struct address_space, i_pages);
448 lockdep_assert_held(&mapping->i_pages.xa_lock);
68d48e6a 449
01959dfe 450 if (node->count && node->count == node->nr_values) {
68d48e6a 451 if (list_empty(&node->private_list)) {
14b46879 452 list_lru_add(&shadow_nodes, &node->private_list);
da3ceeff 453 __inc_lruvec_kmem_state(node, WORKINGSET_NODES);
68d48e6a 454 }
14b46879 455 } else {
68d48e6a 456 if (!list_empty(&node->private_list)) {
14b46879 457 list_lru_del(&shadow_nodes, &node->private_list);
da3ceeff 458 __dec_lruvec_kmem_state(node, WORKINGSET_NODES);
68d48e6a 459 }
14b46879
JW
460 }
461}
449dd698
JW
462
463static unsigned long count_shadow_nodes(struct shrinker *shrinker,
464 struct shrink_control *sc)
465{
449dd698 466 unsigned long max_nodes;
14b46879 467 unsigned long nodes;
95f9ab2d 468 unsigned long pages;
449dd698 469
14b46879 470 nodes = list_lru_shrink_count(&shadow_nodes, sc);
725cac1c
ML
471 if (!nodes)
472 return SHRINK_EMPTY;
449dd698 473
449dd698 474 /*
a97e7904 475 * Approximate a reasonable limit for the nodes
b5388998
JW
476 * containing shadow entries. We don't need to keep more
477 * shadow entries than possible pages on the active list,
478 * since refault distances bigger than that are dismissed.
479 *
480 * The size of the active list converges toward 100% of
481 * overall page cache as memory grows, with only a tiny
482 * inactive list. Assume the total cache size for that.
483 *
484 * Nodes might be sparsely populated, with only one shadow
485 * entry in the extreme case. Obviously, we cannot keep one
486 * node for every eligible shadow entry, so compromise on a
487 * worst-case density of 1/8th. Below that, not all eligible
488 * refaults can be detected anymore.
449dd698 489 *
a97e7904 490 * On 64-bit with 7 xa_nodes per page and 64 slots
449dd698 491 * each, this will reclaim shadow entries when they consume
b5388998 492 * ~1.8% of available memory:
449dd698 493 *
a97e7904 494 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
449dd698 495 */
95f9ab2d 496#ifdef CONFIG_MEMCG
b5388998 497 if (sc->memcg) {
95f9ab2d 498 struct lruvec *lruvec;
2b487e59 499 int i;
95f9ab2d 500
867e5e1d 501 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
2b487e59 502 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
205b20cc
JW
503 pages += lruvec_page_state_local(lruvec,
504 NR_LRU_BASE + i);
d42f3245
RG
505 pages += lruvec_page_state_local(
506 lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
507 pages += lruvec_page_state_local(
508 lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
95f9ab2d
JW
509 } else
510#endif
511 pages = node_present_pages(sc->nid);
512
dad4f140 513 max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
449dd698 514
14b46879 515 if (nodes <= max_nodes)
449dd698 516 return 0;
14b46879 517 return nodes - max_nodes;
449dd698
JW
518}
519
520static enum lru_status shadow_lru_isolate(struct list_head *item,
3f97b163 521 struct list_lru_one *lru,
449dd698 522 spinlock_t *lru_lock,
a97e7904 523 void *arg) __must_hold(lru_lock)
449dd698 524{
a97e7904 525 struct xa_node *node = container_of(item, struct xa_node, private_list);
449dd698 526 struct address_space *mapping;
449dd698
JW
527 int ret;
528
529 /*
f82cd2f0 530 * Page cache insertions and deletions synchronously maintain
b93b0163 531 * the shadow node LRU under the i_pages lock and the
449dd698
JW
532 * lru_lock. Because the page cache tree is emptied before
533 * the inode can be destroyed, holding the lru_lock pins any
a97e7904 534 * address_space that has nodes on the LRU.
449dd698 535 *
b93b0163 536 * We can then safely transition to the i_pages lock to
449dd698
JW
537 * pin only the address_space of the particular node we want
538 * to reclaim, take the node off-LRU, and drop the lru_lock.
539 */
540
01959dfe 541 mapping = container_of(node->array, struct address_space, i_pages);
449dd698
JW
542
543 /* Coming from the list, invert the lock order */
b93b0163 544 if (!xa_trylock(&mapping->i_pages)) {
6ca342d0 545 spin_unlock_irq(lru_lock);
449dd698
JW
546 ret = LRU_RETRY;
547 goto out;
548 }
549
51b8c1fe
JW
550 if (!spin_trylock(&mapping->host->i_lock)) {
551 xa_unlock(&mapping->i_pages);
552 spin_unlock_irq(lru_lock);
553 ret = LRU_RETRY;
554 goto out;
555 }
556
3f97b163 557 list_lru_isolate(lru, item);
da3ceeff 558 __dec_lruvec_kmem_state(node, WORKINGSET_NODES);
68d48e6a 559
449dd698
JW
560 spin_unlock(lru_lock);
561
562 /*
563 * The nodes should only contain one or more shadow entries,
564 * no pages, so we expect to be able to remove them all and
565 * delete and free the empty node afterwards.
566 */
01959dfe 567 if (WARN_ON_ONCE(!node->nr_values))
b936887e 568 goto out_invalid;
01959dfe 569 if (WARN_ON_ONCE(node->count != node->nr_values))
b936887e 570 goto out_invalid;
f82cd2f0 571 xa_delete_node(node, workingset_update_node);
da3ceeff 572 __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
449dd698 573
b936887e 574out_invalid:
6ca342d0 575 xa_unlock_irq(&mapping->i_pages);
51b8c1fe
JW
576 if (mapping_shrinkable(mapping))
577 inode_add_lru(mapping->host);
578 spin_unlock(&mapping->host->i_lock);
449dd698
JW
579 ret = LRU_REMOVED_RETRY;
580out:
449dd698 581 cond_resched();
6ca342d0 582 spin_lock_irq(lru_lock);
449dd698
JW
583 return ret;
584}
585
586static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
587 struct shrink_control *sc)
588{
b93b0163 589 /* list_lru lock nests inside the IRQ-safe i_pages lock */
6b51e881
SAS
590 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
591 NULL);
449dd698
JW
592}
593
594static struct shrinker workingset_shadow_shrinker = {
595 .count_objects = count_shadow_nodes,
596 .scan_objects = scan_shadow_nodes,
4b85afbd 597 .seeks = 0, /* ->count reports only fully expendable nodes */
0a6b76dd 598 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
449dd698
JW
599};
600
601/*
602 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
b93b0163 603 * i_pages lock.
449dd698
JW
604 */
605static struct lock_class_key shadow_nodes_key;
606
607static int __init workingset_init(void)
608{
612e4493
JW
609 unsigned int timestamp_bits;
610 unsigned int max_order;
449dd698
JW
611 int ret;
612
612e4493
JW
613 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
614 /*
615 * Calculate the eviction bucket size to cover the longest
616 * actionable refault distance, which is currently half of
617 * memory (totalram_pages/2). However, memory hotplug may add
618 * some more pages at runtime, so keep working with up to
619 * double the initial memory by using totalram_pages as-is.
620 */
621 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
ca79b0c2 622 max_order = fls_long(totalram_pages() - 1);
612e4493
JW
623 if (max_order > timestamp_bits)
624 bucket_order = max_order - timestamp_bits;
d3d36c4b 625 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
612e4493
JW
626 timestamp_bits, max_order, bucket_order);
627
39887653 628 ret = prealloc_shrinker(&workingset_shadow_shrinker);
449dd698
JW
629 if (ret)
630 goto err;
c92e8e10
KT
631 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
632 &workingset_shadow_shrinker);
449dd698
JW
633 if (ret)
634 goto err_list_lru;
39887653 635 register_shrinker_prepared(&workingset_shadow_shrinker);
449dd698
JW
636 return 0;
637err_list_lru:
39887653 638 free_prealloced_shrinker(&workingset_shadow_shrinker);
449dd698
JW
639err:
640 return ret;
641}
642module_init(workingset_init);