1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
8 #include <linux/memcontrol.h>
9 #include <linux/mm_inline.h>
10 #include <linux/writeback.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/atomic.h>
14 #include <linux/module.h>
15 #include <linux/swap.h>
16 #include <linux/dax.h>
24 * Per node, two clock lists are maintained for file pages: the
25 * inactive and the active list. Freshly faulted pages start out at
26 * the head of the inactive list and page reclaim scans pages from the
27 * tail. Pages that are accessed multiple times on the inactive list
28 * are promoted to the active list, to protect them from reclaim,
29 * whereas active pages are demoted to the inactive list when the
30 * active list grows too big.
32 * fault ------------------------+
34 * +--------------+ | +-------------+
35 * reclaim <- | inactive | <-+-- demotion | active | <--+
36 * +--------------+ +-------------+ |
38 * +-------------- promotion ------------------+
41 * Access frequency and refault distance
43 * A workload is thrashing when its pages are frequently used but they
44 * are evicted from the inactive list every time before another access
45 * would have promoted them to the active list.
47 * In cases where the average access distance between thrashing pages
48 * is bigger than the size of memory there is nothing that can be
49 * done - the thrashing set could never fit into memory under any
52 * However, the average access distance could be bigger than the
53 * inactive list, yet smaller than the size of memory. In this case,
54 * the set could fit into memory if it weren't for the currently
55 * active pages - which may be used more, hopefully less frequently:
57 * +-memory available to cache-+
59 * +-inactive------+-active----+
60 * a b | c d e f g h i | J K L M N |
61 * +---------------+-----------+
63 * It is prohibitively expensive to accurately track access frequency
64 * of pages. But a reasonable approximation can be made to measure
65 * thrashing on the inactive list, after which refaulting pages can be
66 * activated optimistically to compete with the existing active pages.
68 * Approximating inactive page access frequency - Observations:
70 * 1. When a page is accessed for the first time, it is added to the
71 * head of the inactive list, slides every existing inactive page
72 * towards the tail by one slot, and pushes the current tail page
75 * 2. When a page is accessed for the second time, it is promoted to
76 * the active list, shrinking the inactive list by one slot. This
77 * also slides all inactive pages that were faulted into the cache
78 * more recently than the activated page towards the tail of the
83 * 1. The sum of evictions and activations between any two points in
84 * time indicate the minimum number of inactive pages accessed in
87 * 2. Moving one inactive page N page slots towards the tail of the
88 * list requires at least N inactive page accesses.
92 * 1. When a page is finally evicted from memory, the number of
93 * inactive pages accessed while the page was in cache is at least
94 * the number of page slots on the inactive list.
96 * 2. In addition, measuring the sum of evictions and activations (E)
97 * at the time of a page's eviction, and comparing it to another
98 * reading (R) at the time the page faults back into memory tells
99 * the minimum number of accesses while the page was not cached.
100 * This is called the refault distance.
102 * Because the first access of the page was the fault and the second
103 * access the refault, we combine the in-cache distance with the
104 * out-of-cache distance to get the complete minimum access distance
107 * NR_inactive + (R - E)
109 * And knowing the minimum access distance of a page, we can easily
110 * tell if the page would be able to stay in cache assuming all page
111 * slots in the cache were available:
113 * NR_inactive + (R - E) <= NR_inactive + NR_active
115 * If we have swap we should consider about NR_inactive_anon and
116 * NR_active_anon, so for page cache and anonymous respectively:
118 * NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file
119 * + NR_inactive_anon + NR_active_anon
121 * NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon
122 * + NR_inactive_file + NR_active_file
124 * Which can be further simplified to:
126 * (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon
128 * (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file
130 * Put into words, the refault distance (out-of-cache) can be seen as
131 * a deficit in inactive list space (in-cache). If the inactive list
132 * had (R - E) more page slots, the page would not have been evicted
133 * in between accesses, but activated instead. And on a full system,
134 * the only thing eating into inactive list space is active pages.
137 * Refaulting inactive pages
139 * All that is known about the active list is that the pages have been
140 * accessed more than once in the past. This means that at any given
141 * time there is actually a good chance that pages on the active list
142 * are no longer in active use.
144 * So when a refault distance of (R - E) is observed and there are at
145 * least (R - E) pages in the userspace workingset, the refaulting page
146 * is activated optimistically in the hope that (R - E) pages are actually
147 * used less frequently than the refaulting page - or even not used at
150 * That means if inactive cache is refaulting with a suitable refault
151 * distance, we assume the cache workingset is transitioning and put
152 * pressure on the current workingset.
154 * If this is wrong and demotion kicks in, the pages which are truly
155 * used more frequently will be reactivated while the less frequently
156 * used once will be evicted from memory.
158 * But if this is right, the stale pages will be pushed out of memory
159 * and the used pages get to stay in cache.
161 * Refaulting active pages
163 * If on the other hand the refaulting pages have recently been
164 * deactivated, it means that the active list is no longer protecting
165 * actively used cache from reclaim. The cache is NOT transitioning to
166 * a different workingset; the existing workingset is thrashing in the
167 * space allocated to the page cache.
172 * For each node's LRU lists, a counter for inactive evictions and
173 * activations is maintained (node->nonresident_age).
175 * On eviction, a snapshot of this counter (along with some bits to
176 * identify the node) is stored in the now empty page cache
177 * slot of the evicted page. This is called a shadow entry.
179 * On cache misses for which there are shadow entries, an eligible
180 * refault distance will immediately activate the refaulting page.
183 #define WORKINGSET_SHIFT 1
184 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
185 WORKINGSET_SHIFT + NODES_SHIFT + \
187 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
190 * Eviction timestamps need to be able to cover the full range of
191 * actionable refaults. However, bits are tight in the xarray
192 * entry, and after storing the identifier for the lruvec there might
193 * not be enough left to represent every single actionable refault. In
194 * that case, we have to sacrifice granularity for distance, and group
195 * evictions into coarser buckets by shaving off lower timestamp bits.
197 static unsigned int bucket_order __read_mostly
;
199 static void *pack_shadow(int memcgid
, pg_data_t
*pgdat
, unsigned long eviction
,
202 eviction
&= EVICTION_MASK
;
203 eviction
= (eviction
<< MEM_CGROUP_ID_SHIFT
) | memcgid
;
204 eviction
= (eviction
<< NODES_SHIFT
) | pgdat
->node_id
;
205 eviction
= (eviction
<< WORKINGSET_SHIFT
) | workingset
;
207 return xa_mk_value(eviction
);
210 static void unpack_shadow(void *shadow
, int *memcgidp
, pg_data_t
**pgdat
,
211 unsigned long *evictionp
, bool *workingsetp
)
213 unsigned long entry
= xa_to_value(shadow
);
217 workingset
= entry
& ((1UL << WORKINGSET_SHIFT
) - 1);
218 entry
>>= WORKINGSET_SHIFT
;
219 nid
= entry
& ((1UL << NODES_SHIFT
) - 1);
220 entry
>>= NODES_SHIFT
;
221 memcgid
= entry
& ((1UL << MEM_CGROUP_ID_SHIFT
) - 1);
222 entry
>>= MEM_CGROUP_ID_SHIFT
;
225 *pgdat
= NODE_DATA(nid
);
227 *workingsetp
= workingset
;
230 #ifdef CONFIG_LRU_GEN
232 static void *lru_gen_eviction(struct folio
*folio
)
236 unsigned long min_seq
;
237 struct lruvec
*lruvec
;
238 struct lru_gen_folio
*lrugen
;
239 int type
= folio_is_file_lru(folio
);
240 int delta
= folio_nr_pages(folio
);
241 int refs
= folio_lru_refs(folio
);
242 bool workingset
= folio_test_workingset(folio
);
243 int tier
= lru_tier_from_refs(refs
, workingset
);
244 struct mem_cgroup
*memcg
= folio_memcg(folio
);
245 struct pglist_data
*pgdat
= folio_pgdat(folio
);
247 BUILD_BUG_ON(LRU_GEN_WIDTH
+ LRU_REFS_WIDTH
> BITS_PER_LONG
- EVICTION_SHIFT
);
249 lruvec
= mem_cgroup_lruvec(memcg
, pgdat
);
250 lrugen
= &lruvec
->lrugen
;
251 min_seq
= READ_ONCE(lrugen
->min_seq
[type
]);
252 token
= (min_seq
<< LRU_REFS_WIDTH
) | max(refs
- 1, 0);
254 hist
= lru_hist_from_seq(min_seq
);
255 atomic_long_add(delta
, &lrugen
->evicted
[hist
][type
][tier
]);
257 return pack_shadow(mem_cgroup_id(memcg
), pgdat
, token
, workingset
);
261 * Tests if the shadow entry is for a folio that was recently evicted.
262 * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
264 static bool lru_gen_test_recent(void *shadow
, struct lruvec
**lruvec
,
265 unsigned long *token
, bool *workingset
)
268 unsigned long max_seq
;
269 struct mem_cgroup
*memcg
;
270 struct pglist_data
*pgdat
;
272 unpack_shadow(shadow
, &memcg_id
, &pgdat
, token
, workingset
);
274 memcg
= mem_cgroup_from_id(memcg_id
);
275 *lruvec
= mem_cgroup_lruvec(memcg
, pgdat
);
277 max_seq
= READ_ONCE((*lruvec
)->lrugen
.max_seq
);
278 max_seq
&= EVICTION_MASK
>> LRU_REFS_WIDTH
;
280 return abs_diff(max_seq
, *token
>> LRU_REFS_WIDTH
) < MAX_NR_GENS
;
283 static void lru_gen_refault(struct folio
*folio
, void *shadow
)
286 int hist
, tier
, refs
;
289 struct lruvec
*lruvec
;
290 struct lru_gen_folio
*lrugen
;
291 int type
= folio_is_file_lru(folio
);
292 int delta
= folio_nr_pages(folio
);
296 recent
= lru_gen_test_recent(shadow
, &lruvec
, &token
, &workingset
);
297 if (lruvec
!= folio_lruvec(folio
))
300 mod_lruvec_state(lruvec
, WORKINGSET_REFAULT_BASE
+ type
, delta
);
305 lrugen
= &lruvec
->lrugen
;
307 hist
= lru_hist_from_seq(READ_ONCE(lrugen
->min_seq
[type
]));
308 refs
= (token
& (BIT(LRU_REFS_WIDTH
) - 1)) + 1;
309 tier
= lru_tier_from_refs(refs
, workingset
);
311 atomic_long_add(delta
, &lrugen
->refaulted
[hist
][type
][tier
]);
313 /* see folio_add_lru() where folio_set_active() will be called */
314 if (lru_gen_in_fault())
315 mod_lruvec_state(lruvec
, WORKINGSET_ACTIVATE_BASE
+ type
, delta
);
318 folio_set_workingset(folio
);
319 mod_lruvec_state(lruvec
, WORKINGSET_RESTORE_BASE
+ type
, delta
);
321 set_mask_bits(&folio
->flags
, LRU_REFS_MASK
, (refs
- 1UL) << LRU_REFS_PGOFF
);
326 #else /* !CONFIG_LRU_GEN */
328 static void *lru_gen_eviction(struct folio
*folio
)
333 static bool lru_gen_test_recent(void *shadow
, struct lruvec
**lruvec
,
334 unsigned long *token
, bool *workingset
)
339 static void lru_gen_refault(struct folio
*folio
, void *shadow
)
343 #endif /* CONFIG_LRU_GEN */
346 * workingset_age_nonresident - age non-resident entries as LRU ages
347 * @lruvec: the lruvec that was aged
348 * @nr_pages: the number of pages to count
350 * As in-memory pages are aged, non-resident pages need to be aged as
351 * well, in order for the refault distances later on to be comparable
352 * to the in-memory dimensions. This function allows reclaim and LRU
353 * operations to drive the non-resident aging along in parallel.
355 void workingset_age_nonresident(struct lruvec
*lruvec
, unsigned long nr_pages
)
358 * Reclaiming a cgroup means reclaiming all its children in a
359 * round-robin fashion. That means that each cgroup has an LRU
360 * order that is composed of the LRU orders of its child
361 * cgroups; and every page has an LRU position not just in the
362 * cgroup that owns it, but in all of that group's ancestors.
364 * So when the physical inactive list of a leaf cgroup ages,
365 * the virtual inactive lists of all its parents, including
366 * the root cgroup's, age as well.
369 atomic_long_add(nr_pages
, &lruvec
->nonresident_age
);
370 } while ((lruvec
= parent_lruvec(lruvec
)));
374 * workingset_eviction - note the eviction of a folio from memory
375 * @target_memcg: the cgroup that is causing the reclaim
376 * @folio: the folio being evicted
378 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
379 * of the evicted @folio so that a later refault can be detected.
381 void *workingset_eviction(struct folio
*folio
, struct mem_cgroup
*target_memcg
)
383 struct pglist_data
*pgdat
= folio_pgdat(folio
);
384 unsigned long eviction
;
385 struct lruvec
*lruvec
;
388 /* Folio is fully exclusive and pins folio's memory cgroup pointer */
389 VM_BUG_ON_FOLIO(folio_test_lru(folio
), folio
);
390 VM_BUG_ON_FOLIO(folio_ref_count(folio
), folio
);
391 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
393 if (lru_gen_enabled())
394 return lru_gen_eviction(folio
);
396 lruvec
= mem_cgroup_lruvec(target_memcg
, pgdat
);
397 /* XXX: target_memcg can be NULL, go through lruvec */
398 memcgid
= mem_cgroup_id(lruvec_memcg(lruvec
));
399 eviction
= atomic_long_read(&lruvec
->nonresident_age
);
400 eviction
>>= bucket_order
;
401 workingset_age_nonresident(lruvec
, folio_nr_pages(folio
));
402 return pack_shadow(memcgid
, pgdat
, eviction
,
403 folio_test_workingset(folio
));
407 * workingset_test_recent - tests if the shadow entry is for a folio that was
408 * recently evicted. Also fills in @workingset with the value unpacked from
410 * @shadow: the shadow entry to be tested.
411 * @file: whether the corresponding folio is from the file lru.
412 * @workingset: where the workingset value unpacked from shadow should
414 * @flush: whether to flush cgroup rstat.
416 * Return: true if the shadow is for a recently evicted folio; false otherwise.
418 bool workingset_test_recent(void *shadow
, bool file
, bool *workingset
,
421 struct mem_cgroup
*eviction_memcg
;
422 struct lruvec
*eviction_lruvec
;
423 unsigned long refault_distance
;
424 unsigned long workingset_size
;
425 unsigned long refault
;
427 struct pglist_data
*pgdat
;
428 unsigned long eviction
;
430 if (lru_gen_enabled()) {
434 recent
= lru_gen_test_recent(shadow
, &eviction_lruvec
, &eviction
, workingset
);
440 unpack_shadow(shadow
, &memcgid
, &pgdat
, &eviction
, workingset
);
441 eviction
<<= bucket_order
;
444 * Look up the memcg associated with the stored ID. It might
445 * have been deleted since the folio's eviction.
447 * Note that in rare events the ID could have been recycled
448 * for a new cgroup that refaults a shared folio. This is
449 * impossible to tell from the available data. However, this
450 * should be a rare and limited disturbance, and activations
451 * are always speculative anyway. Ultimately, it's the aging
452 * algorithm's job to shake out the minimum access frequency
453 * for the active cache.
455 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
456 * would be better if the root_mem_cgroup existed in all
457 * configurations instead.
459 eviction_memcg
= mem_cgroup_from_id(memcgid
);
460 if (!mem_cgroup_tryget(eviction_memcg
))
461 eviction_memcg
= NULL
;
464 if (!mem_cgroup_disabled() && !eviction_memcg
)
467 * Flush stats (and potentially sleep) outside the RCU read section.
469 * Note that workingset_test_recent() itself might be called in RCU read
470 * section (for e.g, in cachestat) - these callers need to skip flushing
471 * stats (via the flush argument).
473 * XXX: With per-memcg flushing and thresholding, is ratelimiting
477 mem_cgroup_flush_stats_ratelimited(eviction_memcg
);
479 eviction_lruvec
= mem_cgroup_lruvec(eviction_memcg
, pgdat
);
480 refault
= atomic_long_read(&eviction_lruvec
->nonresident_age
);
483 * Calculate the refault distance
485 * The unsigned subtraction here gives an accurate distance
486 * across nonresident_age overflows in most cases. There is a
487 * special case: usually, shadow entries have a short lifetime
488 * and are either refaulted or reclaimed along with the inode
489 * before they get too old. But it is not impossible for the
490 * nonresident_age to lap a shadow entry in the field, which
491 * can then result in a false small refault distance, leading
492 * to a false activation should this old entry actually
493 * refault again. However, earlier kernels used to deactivate
494 * unconditionally with *every* reclaim invocation for the
495 * longest time, so the occasional inappropriate activation
496 * leading to pressure on the active list is not a problem.
498 refault_distance
= (refault
- eviction
) & EVICTION_MASK
;
501 * Compare the distance to the existing workingset size. We
502 * don't activate pages that couldn't stay resident even if
503 * all the memory was available to the workingset. Whether
504 * workingset competition needs to consider anon or not depends
505 * on having free swap space.
507 workingset_size
= lruvec_page_state(eviction_lruvec
, NR_ACTIVE_FILE
);
509 workingset_size
+= lruvec_page_state(eviction_lruvec
,
512 if (mem_cgroup_get_nr_swap_pages(eviction_memcg
) > 0) {
513 workingset_size
+= lruvec_page_state(eviction_lruvec
,
516 workingset_size
+= lruvec_page_state(eviction_lruvec
,
521 mem_cgroup_put(eviction_memcg
);
522 return refault_distance
<= workingset_size
;
526 * workingset_refault - Evaluate the refault of a previously evicted folio.
527 * @folio: The freshly allocated replacement folio.
528 * @shadow: Shadow entry of the evicted folio.
530 * Calculates and evaluates the refault distance of the previously
531 * evicted folio in the context of the node and the memcg whose memory
532 * pressure caused the eviction.
534 void workingset_refault(struct folio
*folio
, void *shadow
)
536 bool file
= folio_is_file_lru(folio
);
537 struct pglist_data
*pgdat
;
538 struct mem_cgroup
*memcg
;
539 struct lruvec
*lruvec
;
543 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
545 if (lru_gen_enabled()) {
546 lru_gen_refault(folio
, shadow
);
551 * The activation decision for this folio is made at the level
552 * where the eviction occurred, as that is where the LRU order
553 * during folio reclaim is being determined.
555 * However, the cgroup that will own the folio is the one that
556 * is actually experiencing the refault event. Make sure the folio is
557 * locked to guarantee folio_memcg() stability throughout.
559 nr
= folio_nr_pages(folio
);
560 memcg
= folio_memcg(folio
);
561 pgdat
= folio_pgdat(folio
);
562 lruvec
= mem_cgroup_lruvec(memcg
, pgdat
);
564 mod_lruvec_state(lruvec
, WORKINGSET_REFAULT_BASE
+ file
, nr
);
566 if (!workingset_test_recent(shadow
, file
, &workingset
, true))
569 folio_set_active(folio
);
570 workingset_age_nonresident(lruvec
, nr
);
571 mod_lruvec_state(lruvec
, WORKINGSET_ACTIVATE_BASE
+ file
, nr
);
573 /* Folio was active prior to eviction */
575 folio_set_workingset(folio
);
577 * XXX: Move to folio_add_lru() when it supports new vs
580 lru_note_cost_refault(folio
);
581 mod_lruvec_state(lruvec
, WORKINGSET_RESTORE_BASE
+ file
, nr
);
586 * workingset_activation - note a page activation
587 * @folio: Folio that is being activated.
589 void workingset_activation(struct folio
*folio
)
592 * Filter non-memcg pages here, e.g. unmap can call
593 * mark_page_accessed() on VDSO pages.
595 if (mem_cgroup_disabled() || folio_memcg_charged(folio
))
596 workingset_age_nonresident(folio_lruvec(folio
), folio_nr_pages(folio
));
600 * Shadow entries reflect the share of the working set that does not
601 * fit into memory, so their number depends on the access pattern of
602 * the workload. In most cases, they will refault or get reclaimed
603 * along with the inode, but a (malicious) workload that streams
604 * through files with a total size several times that of available
605 * memory, while preventing the inodes from being reclaimed, can
606 * create excessive amounts of shadow nodes. To keep a lid on this,
607 * track shadow nodes and reclaim them when they grow way past the
608 * point where they would still be useful.
611 struct list_lru shadow_nodes
;
613 void workingset_update_node(struct xa_node
*node
)
615 struct page
*page
= virt_to_page(node
);
618 * Track non-empty nodes that contain only shadow entries;
619 * unlink those that contain pages or are being freed.
621 * Avoid acquiring the list_lru lock when the nodes are
622 * already where they should be. The list_empty() test is safe
623 * as node->private_list is protected by the i_pages lock.
625 lockdep_assert_held(&node
->array
->xa_lock
);
627 if (node
->count
&& node
->count
== node
->nr_values
) {
628 if (list_empty(&node
->private_list
)) {
629 list_lru_add_obj(&shadow_nodes
, &node
->private_list
);
630 __inc_node_page_state(page
, WORKINGSET_NODES
);
633 if (!list_empty(&node
->private_list
)) {
634 list_lru_del_obj(&shadow_nodes
, &node
->private_list
);
635 __dec_node_page_state(page
, WORKINGSET_NODES
);
640 static unsigned long count_shadow_nodes(struct shrinker
*shrinker
,
641 struct shrink_control
*sc
)
643 unsigned long max_nodes
;
647 nodes
= list_lru_shrink_count(&shadow_nodes
, sc
);
652 * Approximate a reasonable limit for the nodes
653 * containing shadow entries. We don't need to keep more
654 * shadow entries than possible pages on the active list,
655 * since refault distances bigger than that are dismissed.
657 * The size of the active list converges toward 100% of
658 * overall page cache as memory grows, with only a tiny
659 * inactive list. Assume the total cache size for that.
661 * Nodes might be sparsely populated, with only one shadow
662 * entry in the extreme case. Obviously, we cannot keep one
663 * node for every eligible shadow entry, so compromise on a
664 * worst-case density of 1/8th. Below that, not all eligible
665 * refaults can be detected anymore.
667 * On 64-bit with 7 xa_nodes per page and 64 slots
668 * each, this will reclaim shadow entries when they consume
669 * ~1.8% of available memory:
671 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
675 struct lruvec
*lruvec
;
678 mem_cgroup_flush_stats_ratelimited(sc
->memcg
);
679 lruvec
= mem_cgroup_lruvec(sc
->memcg
, NODE_DATA(sc
->nid
));
680 for (pages
= 0, i
= 0; i
< NR_LRU_LISTS
; i
++)
681 pages
+= lruvec_page_state_local(lruvec
,
683 pages
+= lruvec_page_state_local(
684 lruvec
, NR_SLAB_RECLAIMABLE_B
) >> PAGE_SHIFT
;
685 pages
+= lruvec_page_state_local(
686 lruvec
, NR_SLAB_UNRECLAIMABLE_B
) >> PAGE_SHIFT
;
689 pages
= node_present_pages(sc
->nid
);
691 max_nodes
= pages
>> (XA_CHUNK_SHIFT
- 3);
693 if (nodes
<= max_nodes
)
695 return nodes
- max_nodes
;
698 static enum lru_status
shadow_lru_isolate(struct list_head
*item
,
699 struct list_lru_one
*lru
,
700 void *arg
) __must_hold(lru
->lock
)
702 struct xa_node
*node
= container_of(item
, struct xa_node
, private_list
);
703 struct address_space
*mapping
;
707 * Page cache insertions and deletions synchronously maintain
708 * the shadow node LRU under the i_pages lock and the
709 * &lru->lock. Because the page cache tree is emptied before
710 * the inode can be destroyed, holding the &lru->lock pins any
711 * address_space that has nodes on the LRU.
713 * We can then safely transition to the i_pages lock to
714 * pin only the address_space of the particular node we want
715 * to reclaim, take the node off-LRU, and drop the &lru->lock.
718 mapping
= container_of(node
->array
, struct address_space
, i_pages
);
720 /* Coming from the list, invert the lock order */
721 if (!xa_trylock(&mapping
->i_pages
)) {
722 spin_unlock_irq(&lru
->lock
);
727 /* For page cache we need to hold i_lock */
728 if (mapping
->host
!= NULL
) {
729 if (!spin_trylock(&mapping
->host
->i_lock
)) {
730 xa_unlock(&mapping
->i_pages
);
731 spin_unlock_irq(&lru
->lock
);
737 list_lru_isolate(lru
, item
);
738 __dec_node_page_state(virt_to_page(node
), WORKINGSET_NODES
);
740 spin_unlock(&lru
->lock
);
743 * The nodes should only contain one or more shadow entries,
744 * no pages, so we expect to be able to remove them all and
745 * delete and free the empty node afterwards.
747 if (WARN_ON_ONCE(!node
->nr_values
))
749 if (WARN_ON_ONCE(node
->count
!= node
->nr_values
))
751 xa_delete_node(node
, workingset_update_node
);
752 __inc_lruvec_kmem_state(node
, WORKINGSET_NODERECLAIM
);
755 xa_unlock_irq(&mapping
->i_pages
);
756 if (mapping
->host
!= NULL
) {
757 if (mapping_shrinkable(mapping
))
758 inode_add_lru(mapping
->host
);
759 spin_unlock(&mapping
->host
->i_lock
);
761 ret
= LRU_REMOVED_RETRY
;
767 static unsigned long scan_shadow_nodes(struct shrinker
*shrinker
,
768 struct shrink_control
*sc
)
770 /* list_lru lock nests inside the IRQ-safe i_pages lock */
771 return list_lru_shrink_walk_irq(&shadow_nodes
, sc
, shadow_lru_isolate
,
776 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
779 static struct lock_class_key shadow_nodes_key
;
781 static int __init
workingset_init(void)
783 struct shrinker
*workingset_shadow_shrinker
;
784 unsigned int timestamp_bits
;
785 unsigned int max_order
;
788 BUILD_BUG_ON(BITS_PER_LONG
< EVICTION_SHIFT
);
790 * Calculate the eviction bucket size to cover the longest
791 * actionable refault distance, which is currently half of
792 * memory (totalram_pages/2). However, memory hotplug may add
793 * some more pages at runtime, so keep working with up to
794 * double the initial memory by using totalram_pages as-is.
796 timestamp_bits
= BITS_PER_LONG
- EVICTION_SHIFT
;
797 max_order
= fls_long(totalram_pages() - 1);
798 if (max_order
> timestamp_bits
)
799 bucket_order
= max_order
- timestamp_bits
;
800 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
801 timestamp_bits
, max_order
, bucket_order
);
803 workingset_shadow_shrinker
= shrinker_alloc(SHRINKER_NUMA_AWARE
|
804 SHRINKER_MEMCG_AWARE
,
806 if (!workingset_shadow_shrinker
)
809 ret
= list_lru_init_memcg_key(&shadow_nodes
, workingset_shadow_shrinker
,
814 workingset_shadow_shrinker
->count_objects
= count_shadow_nodes
;
815 workingset_shadow_shrinker
->scan_objects
= scan_shadow_nodes
;
816 /* ->count reports only fully expendable nodes */
817 workingset_shadow_shrinker
->seeks
= 0;
819 shrinker_register(workingset_shadow_shrinker
);
822 shrinker_free(workingset_shadow_shrinker
);
826 module_init(workingset_init
);