1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory merging support.
5 * This code enables dynamic sharing of identical pages found in different
6 * memory areas, even if they are not shared by fork()
8 * Copyright (C) 2008-2009 Red Hat, Inc.
16 #include <linux/errno.h>
18 #include <linux/mm_inline.h>
20 #include <linux/mman.h>
21 #include <linux/sched.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/coredump.h>
24 #include <linux/sched/cputime.h>
25 #include <linux/rwsem.h>
26 #include <linux/pagemap.h>
27 #include <linux/rmap.h>
28 #include <linux/spinlock.h>
29 #include <linux/xxhash.h>
30 #include <linux/delay.h>
31 #include <linux/kthread.h>
32 #include <linux/wait.h>
33 #include <linux/slab.h>
34 #include <linux/rbtree.h>
35 #include <linux/memory.h>
36 #include <linux/mmu_notifier.h>
37 #include <linux/swap.h>
38 #include <linux/ksm.h>
39 #include <linux/hashtable.h>
40 #include <linux/freezer.h>
41 #include <linux/oom.h>
42 #include <linux/numa.h>
43 #include <linux/pagewalk.h>
45 #include <asm/tlbflush.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/ksm.h>
54 #define DO_NUMA(x) do { (x); } while (0)
57 #define DO_NUMA(x) do { } while (0)
60 typedef u8 rmap_age_t
;
65 * A few notes about the KSM scanning process,
66 * to make it easier to understand the data structures below:
68 * In order to reduce excessive scanning, KSM sorts the memory pages by their
69 * contents into a data structure that holds pointers to the pages' locations.
71 * Since the contents of the pages may change at any moment, KSM cannot just
72 * insert the pages into a normal sorted tree and expect it to find anything.
73 * Therefore KSM uses two data structures - the stable and the unstable tree.
75 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
76 * by their contents. Because each such page is write-protected, searching on
77 * this tree is fully assured to be working (except when pages are unmapped),
78 * and therefore this tree is called the stable tree.
80 * The stable tree node includes information required for reverse
81 * mapping from a KSM page to virtual addresses that map this page.
83 * In order to avoid large latencies of the rmap walks on KSM pages,
84 * KSM maintains two types of nodes in the stable tree:
86 * * the regular nodes that keep the reverse mapping structures in a
88 * * the "chains" that link nodes ("dups") that represent the same
89 * write protected memory content, but each "dup" corresponds to a
90 * different KSM page copy of that content
92 * Internally, the regular nodes, "dups" and "chains" are represented
93 * using the same struct ksm_stable_node structure.
95 * In addition to the stable tree, KSM uses a second data structure called the
96 * unstable tree: this tree holds pointers to pages which have been found to
97 * be "unchanged for a period of time". The unstable tree sorts these pages
98 * by their contents, but since they are not write-protected, KSM cannot rely
99 * upon the unstable tree to work correctly - the unstable tree is liable to
100 * be corrupted as its contents are modified, and so it is called unstable.
102 * KSM solves this problem by several techniques:
104 * 1) The unstable tree is flushed every time KSM completes scanning all
105 * memory areas, and then the tree is rebuilt again from the beginning.
106 * 2) KSM will only insert into the unstable tree, pages whose hash value
107 * has not changed since the previous scan of all memory areas.
108 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
109 * colors of the nodes and not on their contents, assuring that even when
110 * the tree gets "corrupted" it won't get out of balance, so scanning time
111 * remains the same (also, searching and inserting nodes in an rbtree uses
112 * the same algorithm, so we have no overhead when we flush and rebuild).
113 * 4) KSM never flushes the stable tree, which means that even if it were to
114 * take 10 attempts to find a page in the unstable tree, once it is found,
115 * it is secured in the stable tree. (When we scan a new page, we first
116 * compare it against the stable tree, and then against the unstable tree.)
118 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
119 * stable trees and multiple unstable trees: one of each for each NUMA node.
123 * struct ksm_mm_slot - ksm information per mm that is being scanned
124 * @slot: hash lookup from mm to mm_slot
125 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
129 struct ksm_rmap_item
*rmap_list
;
133 * struct ksm_scan - cursor for scanning
134 * @mm_slot: the current mm_slot we are scanning
135 * @address: the next address inside that to be scanned
136 * @rmap_list: link to the next rmap to be scanned in the rmap_list
137 * @seqnr: count of completed full scans (needed when removing unstable node)
139 * There is only the one ksm_scan instance of this cursor structure.
142 struct ksm_mm_slot
*mm_slot
;
143 unsigned long address
;
144 struct ksm_rmap_item
**rmap_list
;
149 * struct ksm_stable_node - node of the stable rbtree
150 * @node: rb node of this ksm page in the stable tree
151 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
152 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
153 * @list: linked into migrate_nodes, pending placement in the proper node tree
154 * @hlist: hlist head of rmap_items using this ksm page
155 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
156 * @chain_prune_time: time of the last full garbage collection
157 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
158 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
160 struct ksm_stable_node
{
162 struct rb_node node
; /* when node of stable tree */
163 struct { /* when listed for migration */
164 struct list_head
*head
;
166 struct hlist_node hlist_dup
;
167 struct list_head list
;
171 struct hlist_head hlist
;
174 unsigned long chain_prune_time
;
177 * STABLE_NODE_CHAIN can be any negative number in
178 * rmap_hlist_len negative range, but better not -1 to be able
179 * to reliably detect underflows.
181 #define STABLE_NODE_CHAIN -1024
189 * struct ksm_rmap_item - reverse mapping item for virtual addresses
190 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
191 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
192 * @nid: NUMA node id of unstable tree in which linked (may not match page)
193 * @mm: the memory structure this rmap_item is pointing into
194 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
195 * @oldchecksum: previous checksum of the page at that virtual address
196 * @node: rb node of this rmap_item in the unstable tree
197 * @head: pointer to stable_node heading this list in the stable tree
198 * @hlist: link into hlist of rmap_items hanging off that stable_node
199 * @age: number of scan iterations since creation
200 * @remaining_skips: how many scans to skip
202 struct ksm_rmap_item
{
203 struct ksm_rmap_item
*rmap_list
;
205 struct anon_vma
*anon_vma
; /* when stable */
207 int nid
; /* when node of unstable tree */
210 struct mm_struct
*mm
;
211 unsigned long address
; /* + low bits used for flags below */
212 unsigned int oldchecksum
; /* when unstable */
214 rmap_age_t remaining_skips
;
216 struct rb_node node
; /* when node of unstable tree */
217 struct { /* when listed from stable tree */
218 struct ksm_stable_node
*head
;
219 struct hlist_node hlist
;
224 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
225 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
226 #define STABLE_FLAG 0x200 /* is listed from the stable tree */
228 /* The stable and unstable tree heads */
229 static struct rb_root one_stable_tree
[1] = { RB_ROOT
};
230 static struct rb_root one_unstable_tree
[1] = { RB_ROOT
};
231 static struct rb_root
*root_stable_tree
= one_stable_tree
;
232 static struct rb_root
*root_unstable_tree
= one_unstable_tree
;
234 /* Recently migrated nodes of stable tree, pending proper placement */
235 static LIST_HEAD(migrate_nodes
);
236 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
238 #define MM_SLOTS_HASH_BITS 10
239 static DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
241 static struct ksm_mm_slot ksm_mm_head
= {
242 .slot
.mm_node
= LIST_HEAD_INIT(ksm_mm_head
.slot
.mm_node
),
244 static struct ksm_scan ksm_scan
= {
245 .mm_slot
= &ksm_mm_head
,
248 static struct kmem_cache
*rmap_item_cache
;
249 static struct kmem_cache
*stable_node_cache
;
250 static struct kmem_cache
*mm_slot_cache
;
252 /* Default number of pages to scan per batch */
253 #define DEFAULT_PAGES_TO_SCAN 100
255 /* The number of pages scanned */
256 static unsigned long ksm_pages_scanned
;
258 /* The number of nodes in the stable tree */
259 static unsigned long ksm_pages_shared
;
261 /* The number of page slots additionally sharing those nodes */
262 static unsigned long ksm_pages_sharing
;
264 /* The number of nodes in the unstable tree */
265 static unsigned long ksm_pages_unshared
;
267 /* The number of rmap_items in use: to calculate pages_volatile */
268 static unsigned long ksm_rmap_items
;
270 /* The number of stable_node chains */
271 static unsigned long ksm_stable_node_chains
;
273 /* The number of stable_node dups linked to the stable_node chains */
274 static unsigned long ksm_stable_node_dups
;
276 /* Delay in pruning stale stable_node_dups in the stable_node_chains */
277 static unsigned int ksm_stable_node_chains_prune_millisecs
= 2000;
279 /* Maximum number of page slots sharing a stable node */
280 static int ksm_max_page_sharing
= 256;
282 /* Number of pages ksmd should scan in one batch */
283 static unsigned int ksm_thread_pages_to_scan
= DEFAULT_PAGES_TO_SCAN
;
285 /* Milliseconds ksmd should sleep between batches */
286 static unsigned int ksm_thread_sleep_millisecs
= 20;
288 /* Checksum of an empty (zeroed) page */
289 static unsigned int zero_checksum __read_mostly
;
291 /* Whether to merge empty (zeroed) pages with actual zero pages */
292 static bool ksm_use_zero_pages __read_mostly
;
294 /* Skip pages that couldn't be de-duplicated previously */
295 /* Default to true at least temporarily, for testing */
296 static bool ksm_smart_scan
= true;
298 /* The number of zero pages which is placed by KSM */
299 unsigned long ksm_zero_pages
;
301 /* The number of pages that have been skipped due to "smart scanning" */
302 static unsigned long ksm_pages_skipped
;
304 /* Don't scan more than max pages per batch. */
305 static unsigned long ksm_advisor_max_pages_to_scan
= 30000;
307 /* Min CPU for scanning pages per scan */
308 #define KSM_ADVISOR_MIN_CPU 10
310 /* Max CPU for scanning pages per scan */
311 static unsigned int ksm_advisor_max_cpu
= 70;
313 /* Target scan time in seconds to analyze all KSM candidate pages. */
314 static unsigned long ksm_advisor_target_scan_time
= 200;
316 /* Exponentially weighted moving average. */
317 #define EWMA_WEIGHT 30
320 * struct advisor_ctx - metadata for KSM advisor
321 * @start_scan: start time of the current scan
322 * @scan_time: scan time of previous scan
323 * @change: change in percent to pages_to_scan parameter
324 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
328 unsigned long scan_time
;
329 unsigned long change
;
330 unsigned long long cpu_time
;
332 static struct advisor_ctx advisor_ctx
;
334 /* Define different advisor's */
335 enum ksm_advisor_type
{
337 KSM_ADVISOR_SCAN_TIME
,
339 static enum ksm_advisor_type ksm_advisor
;
343 * Only called through the sysfs control interface:
346 /* At least scan this many pages per batch. */
347 static unsigned long ksm_advisor_min_pages_to_scan
= 500;
349 static void set_advisor_defaults(void)
351 if (ksm_advisor
== KSM_ADVISOR_NONE
) {
352 ksm_thread_pages_to_scan
= DEFAULT_PAGES_TO_SCAN
;
353 } else if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
) {
354 advisor_ctx
= (const struct advisor_ctx
){ 0 };
355 ksm_thread_pages_to_scan
= ksm_advisor_min_pages_to_scan
;
358 #endif /* CONFIG_SYSFS */
360 static inline void advisor_start_scan(void)
362 if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
)
363 advisor_ctx
.start_scan
= ktime_get();
367 * Use previous scan time if available, otherwise use current scan time as an
368 * approximation for the previous scan time.
370 static inline unsigned long prev_scan_time(struct advisor_ctx
*ctx
,
371 unsigned long scan_time
)
373 return ctx
->scan_time
? ctx
->scan_time
: scan_time
;
376 /* Calculate exponential weighted moving average */
377 static unsigned long ewma(unsigned long prev
, unsigned long curr
)
379 return ((100 - EWMA_WEIGHT
) * prev
+ EWMA_WEIGHT
* curr
) / 100;
383 * The scan time advisor is based on the current scan rate and the target
386 * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
388 * To avoid perturbations it calculates a change factor of previous changes.
389 * A new change factor is calculated for each iteration and it uses an
390 * exponentially weighted moving average. The new pages_to_scan value is
391 * multiplied with that change factor:
393 * new_pages_to_scan *= change facor
395 * The new_pages_to_scan value is limited by the cpu min and max values. It
396 * calculates the cpu percent for the last scan and calculates the new
397 * estimated cpu percent cost for the next scan. That value is capped by the
398 * cpu min and max setting.
400 * In addition the new pages_to_scan value is capped by the max and min
403 static void scan_time_advisor(void)
405 unsigned int cpu_percent
;
406 unsigned long cpu_time
;
407 unsigned long cpu_time_diff
;
408 unsigned long cpu_time_diff_ms
;
410 unsigned long per_page_cost
;
411 unsigned long factor
;
412 unsigned long change
;
413 unsigned long last_scan_time
;
414 unsigned long scan_time
;
416 /* Convert scan time to seconds */
417 scan_time
= div_s64(ktime_ms_delta(ktime_get(), advisor_ctx
.start_scan
),
419 scan_time
= scan_time
? scan_time
: 1;
421 /* Calculate CPU consumption of ksmd background thread */
422 cpu_time
= task_sched_runtime(current
);
423 cpu_time_diff
= cpu_time
- advisor_ctx
.cpu_time
;
424 cpu_time_diff_ms
= cpu_time_diff
/ 1000 / 1000;
426 cpu_percent
= (cpu_time_diff_ms
* 100) / (scan_time
* 1000);
427 cpu_percent
= cpu_percent
? cpu_percent
: 1;
428 last_scan_time
= prev_scan_time(&advisor_ctx
, scan_time
);
430 /* Calculate scan time as percentage of target scan time */
431 factor
= ksm_advisor_target_scan_time
* 100 / scan_time
;
432 factor
= factor
? factor
: 1;
435 * Calculate scan time as percentage of last scan time and use
436 * exponentially weighted average to smooth it
438 change
= scan_time
* 100 / last_scan_time
;
439 change
= change
? change
: 1;
440 change
= ewma(advisor_ctx
.change
, change
);
442 /* Calculate new scan rate based on target scan rate. */
443 pages
= ksm_thread_pages_to_scan
* 100 / factor
;
444 /* Update pages_to_scan by weighted change percentage. */
445 pages
= pages
* change
/ 100;
447 /* Cap new pages_to_scan value */
448 per_page_cost
= ksm_thread_pages_to_scan
/ cpu_percent
;
449 per_page_cost
= per_page_cost
? per_page_cost
: 1;
451 pages
= min(pages
, per_page_cost
* ksm_advisor_max_cpu
);
452 pages
= max(pages
, per_page_cost
* KSM_ADVISOR_MIN_CPU
);
453 pages
= min(pages
, ksm_advisor_max_pages_to_scan
);
455 /* Update advisor context */
456 advisor_ctx
.change
= change
;
457 advisor_ctx
.scan_time
= scan_time
;
458 advisor_ctx
.cpu_time
= cpu_time
;
460 ksm_thread_pages_to_scan
= pages
;
461 trace_ksm_advisor(scan_time
, pages
, cpu_percent
);
464 static void advisor_stop_scan(void)
466 if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
)
471 /* Zeroed when merging across nodes is not allowed */
472 static unsigned int ksm_merge_across_nodes
= 1;
473 static int ksm_nr_node_ids
= 1;
475 #define ksm_merge_across_nodes 1U
476 #define ksm_nr_node_ids 1
479 #define KSM_RUN_STOP 0
480 #define KSM_RUN_MERGE 1
481 #define KSM_RUN_UNMERGE 2
482 #define KSM_RUN_OFFLINE 4
483 static unsigned long ksm_run
= KSM_RUN_STOP
;
484 static void wait_while_offlining(void);
486 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait
);
487 static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait
);
488 static DEFINE_MUTEX(ksm_thread_mutex
);
489 static DEFINE_SPINLOCK(ksm_mmlist_lock
);
491 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
492 sizeof(struct __struct), __alignof__(struct __struct),\
495 static int __init
ksm_slab_init(void)
497 rmap_item_cache
= KSM_KMEM_CACHE(ksm_rmap_item
, 0);
498 if (!rmap_item_cache
)
501 stable_node_cache
= KSM_KMEM_CACHE(ksm_stable_node
, 0);
502 if (!stable_node_cache
)
505 mm_slot_cache
= KSM_KMEM_CACHE(ksm_mm_slot
, 0);
512 kmem_cache_destroy(stable_node_cache
);
514 kmem_cache_destroy(rmap_item_cache
);
519 static void __init
ksm_slab_free(void)
521 kmem_cache_destroy(mm_slot_cache
);
522 kmem_cache_destroy(stable_node_cache
);
523 kmem_cache_destroy(rmap_item_cache
);
524 mm_slot_cache
= NULL
;
527 static __always_inline
bool is_stable_node_chain(struct ksm_stable_node
*chain
)
529 return chain
->rmap_hlist_len
== STABLE_NODE_CHAIN
;
532 static __always_inline
bool is_stable_node_dup(struct ksm_stable_node
*dup
)
534 return dup
->head
== STABLE_NODE_DUP_HEAD
;
537 static inline void stable_node_chain_add_dup(struct ksm_stable_node
*dup
,
538 struct ksm_stable_node
*chain
)
540 VM_BUG_ON(is_stable_node_dup(dup
));
541 dup
->head
= STABLE_NODE_DUP_HEAD
;
542 VM_BUG_ON(!is_stable_node_chain(chain
));
543 hlist_add_head(&dup
->hlist_dup
, &chain
->hlist
);
544 ksm_stable_node_dups
++;
547 static inline void __stable_node_dup_del(struct ksm_stable_node
*dup
)
549 VM_BUG_ON(!is_stable_node_dup(dup
));
550 hlist_del(&dup
->hlist_dup
);
551 ksm_stable_node_dups
--;
554 static inline void stable_node_dup_del(struct ksm_stable_node
*dup
)
556 VM_BUG_ON(is_stable_node_chain(dup
));
557 if (is_stable_node_dup(dup
))
558 __stable_node_dup_del(dup
);
560 rb_erase(&dup
->node
, root_stable_tree
+ NUMA(dup
->nid
));
561 #ifdef CONFIG_DEBUG_VM
566 static inline struct ksm_rmap_item
*alloc_rmap_item(void)
568 struct ksm_rmap_item
*rmap_item
;
570 rmap_item
= kmem_cache_zalloc(rmap_item_cache
, GFP_KERNEL
|
571 __GFP_NORETRY
| __GFP_NOWARN
);
577 static inline void free_rmap_item(struct ksm_rmap_item
*rmap_item
)
580 rmap_item
->mm
->ksm_rmap_items
--;
581 rmap_item
->mm
= NULL
; /* debug safety */
582 kmem_cache_free(rmap_item_cache
, rmap_item
);
585 static inline struct ksm_stable_node
*alloc_stable_node(void)
588 * The allocation can take too long with GFP_KERNEL when memory is under
589 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
590 * grants access to memory reserves, helping to avoid this problem.
592 return kmem_cache_alloc(stable_node_cache
, GFP_KERNEL
| __GFP_HIGH
);
595 static inline void free_stable_node(struct ksm_stable_node
*stable_node
)
597 VM_BUG_ON(stable_node
->rmap_hlist_len
&&
598 !is_stable_node_chain(stable_node
));
599 kmem_cache_free(stable_node_cache
, stable_node
);
603 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
604 * page tables after it has passed through ksm_exit() - which, if necessary,
605 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
606 * a special flag: they can just back out as soon as mm_users goes to zero.
607 * ksm_test_exit() is used throughout to make this test for exit: in some
608 * places for correctness, in some places just to avoid unnecessary work.
610 static inline bool ksm_test_exit(struct mm_struct
*mm
)
612 return atomic_read(&mm
->mm_users
) == 0;
615 static int break_ksm_pmd_entry(pmd_t
*pmd
, unsigned long addr
, unsigned long next
,
616 struct mm_walk
*walk
)
618 struct page
*page
= NULL
;
624 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
627 ptent
= ptep_get(pte
);
628 if (pte_present(ptent
)) {
629 page
= vm_normal_page(walk
->vma
, addr
, ptent
);
630 } else if (!pte_none(ptent
)) {
631 swp_entry_t entry
= pte_to_swp_entry(ptent
);
634 * As KSM pages remain KSM pages until freed, no need to wait
635 * here for migration to end.
637 if (is_migration_entry(entry
))
638 page
= pfn_swap_entry_to_page(entry
);
640 /* return 1 if the page is an normal ksm page or KSM-placed zero page */
641 ret
= (page
&& PageKsm(page
)) || is_ksm_zero_pte(ptent
);
642 pte_unmap_unlock(pte
, ptl
);
646 static const struct mm_walk_ops break_ksm_ops
= {
647 .pmd_entry
= break_ksm_pmd_entry
,
648 .walk_lock
= PGWALK_RDLOCK
,
651 static const struct mm_walk_ops break_ksm_lock_vma_ops
= {
652 .pmd_entry
= break_ksm_pmd_entry
,
653 .walk_lock
= PGWALK_WRLOCK
,
657 * We use break_ksm to break COW on a ksm page by triggering unsharing,
658 * such that the ksm page will get replaced by an exclusive anonymous page.
660 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
661 * in case the application has unmapped and remapped mm,addr meanwhile.
662 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
663 * mmap of /dev/mem, where we would not want to touch it.
665 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context
666 * of the process that owns 'vma'. We also do not want to enforce
667 * protection keys here anyway.
669 static int break_ksm(struct vm_area_struct
*vma
, unsigned long addr
, bool lock_vma
)
672 const struct mm_walk_ops
*ops
= lock_vma
?
673 &break_ksm_lock_vma_ops
: &break_ksm_ops
;
679 ksm_page
= walk_page_range_vma(vma
, addr
, addr
+ 1, ops
, NULL
);
680 if (WARN_ON_ONCE(ksm_page
< 0))
684 ret
= handle_mm_fault(vma
, addr
,
685 FAULT_FLAG_UNSHARE
| FAULT_FLAG_REMOTE
,
687 } while (!(ret
& (VM_FAULT_SIGBUS
| VM_FAULT_SIGSEGV
| VM_FAULT_OOM
)));
689 * We must loop until we no longer find a KSM page because
690 * handle_mm_fault() may back out if there's any difficulty e.g. if
691 * pte accessed bit gets updated concurrently.
693 * VM_FAULT_SIGBUS could occur if we race with truncation of the
694 * backing file, which also invalidates anonymous pages: that's
695 * okay, that truncation will have unmapped the PageKsm for us.
697 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
698 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
699 * current task has TIF_MEMDIE set, and will be OOM killed on return
700 * to user; and ksmd, having no mm, would never be chosen for that.
702 * But if the mm is in a limited mem_cgroup, then the fault may fail
703 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
704 * even ksmd can fail in this way - though it's usually breaking ksm
705 * just to undo a merge it made a moment before, so unlikely to oom.
707 * That's a pity: we might therefore have more kernel pages allocated
708 * than we're counting as nodes in the stable tree; but ksm_do_scan
709 * will retry to break_cow on each pass, so should recover the page
710 * in due course. The important thing is to not let VM_MERGEABLE
711 * be cleared while any such pages might remain in the area.
713 return (ret
& VM_FAULT_OOM
) ? -ENOMEM
: 0;
716 static bool vma_ksm_compatible(struct vm_area_struct
*vma
)
718 if (vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
| VM_PFNMAP
|
719 VM_IO
| VM_DONTEXPAND
| VM_HUGETLB
|
721 return false; /* just ignore the advice */
727 if (vma
->vm_flags
& VM_SAO
)
731 if (vma
->vm_flags
& VM_SPARC_ADI
)
738 static struct vm_area_struct
*find_mergeable_vma(struct mm_struct
*mm
,
741 struct vm_area_struct
*vma
;
742 if (ksm_test_exit(mm
))
744 vma
= vma_lookup(mm
, addr
);
745 if (!vma
|| !(vma
->vm_flags
& VM_MERGEABLE
) || !vma
->anon_vma
)
750 static void break_cow(struct ksm_rmap_item
*rmap_item
)
752 struct mm_struct
*mm
= rmap_item
->mm
;
753 unsigned long addr
= rmap_item
->address
;
754 struct vm_area_struct
*vma
;
757 * It is not an accident that whenever we want to break COW
758 * to undo, we also need to drop a reference to the anon_vma.
760 put_anon_vma(rmap_item
->anon_vma
);
763 vma
= find_mergeable_vma(mm
, addr
);
765 break_ksm(vma
, addr
, false);
766 mmap_read_unlock(mm
);
769 static struct page
*get_mergeable_page(struct ksm_rmap_item
*rmap_item
)
771 struct mm_struct
*mm
= rmap_item
->mm
;
772 unsigned long addr
= rmap_item
->address
;
773 struct vm_area_struct
*vma
;
777 vma
= find_mergeable_vma(mm
, addr
);
781 page
= follow_page(vma
, addr
, FOLL_GET
);
782 if (IS_ERR_OR_NULL(page
))
784 if (is_zone_device_page(page
))
786 if (PageAnon(page
)) {
787 flush_anon_page(vma
, page
, addr
);
788 flush_dcache_page(page
);
795 mmap_read_unlock(mm
);
800 * This helper is used for getting right index into array of tree roots.
801 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
802 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
803 * every node has its own stable and unstable tree.
805 static inline int get_kpfn_nid(unsigned long kpfn
)
807 return ksm_merge_across_nodes
? 0 : NUMA(pfn_to_nid(kpfn
));
810 static struct ksm_stable_node
*alloc_stable_node_chain(struct ksm_stable_node
*dup
,
811 struct rb_root
*root
)
813 struct ksm_stable_node
*chain
= alloc_stable_node();
814 VM_BUG_ON(is_stable_node_chain(dup
));
816 INIT_HLIST_HEAD(&chain
->hlist
);
817 chain
->chain_prune_time
= jiffies
;
818 chain
->rmap_hlist_len
= STABLE_NODE_CHAIN
;
819 #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
820 chain
->nid
= NUMA_NO_NODE
; /* debug */
822 ksm_stable_node_chains
++;
825 * Put the stable node chain in the first dimension of
826 * the stable tree and at the same time remove the old
829 rb_replace_node(&dup
->node
, &chain
->node
, root
);
832 * Move the old stable node to the second dimension
833 * queued in the hlist_dup. The invariant is that all
834 * dup stable_nodes in the chain->hlist point to pages
835 * that are write protected and have the exact same
838 stable_node_chain_add_dup(dup
, chain
);
843 static inline void free_stable_node_chain(struct ksm_stable_node
*chain
,
844 struct rb_root
*root
)
846 rb_erase(&chain
->node
, root
);
847 free_stable_node(chain
);
848 ksm_stable_node_chains
--;
851 static void remove_node_from_stable_tree(struct ksm_stable_node
*stable_node
)
853 struct ksm_rmap_item
*rmap_item
;
855 /* check it's not STABLE_NODE_CHAIN or negative */
856 BUG_ON(stable_node
->rmap_hlist_len
< 0);
858 hlist_for_each_entry(rmap_item
, &stable_node
->hlist
, hlist
) {
859 if (rmap_item
->hlist
.next
) {
861 trace_ksm_remove_rmap_item(stable_node
->kpfn
, rmap_item
, rmap_item
->mm
);
866 rmap_item
->mm
->ksm_merging_pages
--;
868 VM_BUG_ON(stable_node
->rmap_hlist_len
<= 0);
869 stable_node
->rmap_hlist_len
--;
870 put_anon_vma(rmap_item
->anon_vma
);
871 rmap_item
->address
&= PAGE_MASK
;
876 * We need the second aligned pointer of the migrate_nodes
877 * list_head to stay clear from the rb_parent_color union
878 * (aligned and different than any node) and also different
879 * from &migrate_nodes. This will verify that future list.h changes
880 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
882 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD
<= &migrate_nodes
);
883 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD
>= &migrate_nodes
+ 1);
885 trace_ksm_remove_ksm_page(stable_node
->kpfn
);
886 if (stable_node
->head
== &migrate_nodes
)
887 list_del(&stable_node
->list
);
889 stable_node_dup_del(stable_node
);
890 free_stable_node(stable_node
);
893 enum get_ksm_page_flags
{
900 * get_ksm_page: checks if the page indicated by the stable node
901 * is still its ksm page, despite having held no reference to it.
902 * In which case we can trust the content of the page, and it
903 * returns the gotten page; but if the page has now been zapped,
904 * remove the stale node from the stable tree and return NULL.
905 * But beware, the stable node's page might be being migrated.
907 * You would expect the stable_node to hold a reference to the ksm page.
908 * But if it increments the page's count, swapping out has to wait for
909 * ksmd to come around again before it can free the page, which may take
910 * seconds or even minutes: much too unresponsive. So instead we use a
911 * "keyhole reference": access to the ksm page from the stable node peeps
912 * out through its keyhole to see if that page still holds the right key,
913 * pointing back to this stable node. This relies on freeing a PageAnon
914 * page to reset its page->mapping to NULL, and relies on no other use of
915 * a page to put something that might look like our key in page->mapping.
916 * is on its way to being freed; but it is an anomaly to bear in mind.
918 static struct page
*get_ksm_page(struct ksm_stable_node
*stable_node
,
919 enum get_ksm_page_flags flags
)
922 void *expected_mapping
;
925 expected_mapping
= (void *)((unsigned long)stable_node
|
928 kpfn
= READ_ONCE(stable_node
->kpfn
); /* Address dependency. */
929 page
= pfn_to_page(kpfn
);
930 if (READ_ONCE(page
->mapping
) != expected_mapping
)
934 * We cannot do anything with the page while its refcount is 0.
935 * Usually 0 means free, or tail of a higher-order page: in which
936 * case this node is no longer referenced, and should be freed;
937 * however, it might mean that the page is under page_ref_freeze().
938 * The __remove_mapping() case is easy, again the node is now stale;
939 * the same is in reuse_ksm_page() case; but if page is swapcache
940 * in folio_migrate_mapping(), it might still be our page,
941 * in which case it's essential to keep the node.
943 while (!get_page_unless_zero(page
)) {
945 * Another check for page->mapping != expected_mapping would
946 * work here too. We have chosen the !PageSwapCache test to
947 * optimize the common case, when the page is or is about to
948 * be freed: PageSwapCache is cleared (under spin_lock_irq)
949 * in the ref_freeze section of __remove_mapping(); but Anon
950 * page->mapping reset to NULL later, in free_pages_prepare().
952 if (!PageSwapCache(page
))
957 if (READ_ONCE(page
->mapping
) != expected_mapping
) {
962 if (flags
== GET_KSM_PAGE_TRYLOCK
) {
963 if (!trylock_page(page
)) {
965 return ERR_PTR(-EBUSY
);
967 } else if (flags
== GET_KSM_PAGE_LOCK
)
970 if (flags
!= GET_KSM_PAGE_NOLOCK
) {
971 if (READ_ONCE(page
->mapping
) != expected_mapping
) {
981 * We come here from above when page->mapping or !PageSwapCache
982 * suggests that the node is stale; but it might be under migration.
983 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
984 * before checking whether node->kpfn has been changed.
987 if (READ_ONCE(stable_node
->kpfn
) != kpfn
)
989 remove_node_from_stable_tree(stable_node
);
994 * Removing rmap_item from stable or unstable tree.
995 * This function will clean the information from the stable/unstable tree.
997 static void remove_rmap_item_from_tree(struct ksm_rmap_item
*rmap_item
)
999 if (rmap_item
->address
& STABLE_FLAG
) {
1000 struct ksm_stable_node
*stable_node
;
1003 stable_node
= rmap_item
->head
;
1004 page
= get_ksm_page(stable_node
, GET_KSM_PAGE_LOCK
);
1008 hlist_del(&rmap_item
->hlist
);
1012 if (!hlist_empty(&stable_node
->hlist
))
1013 ksm_pages_sharing
--;
1017 rmap_item
->mm
->ksm_merging_pages
--;
1019 VM_BUG_ON(stable_node
->rmap_hlist_len
<= 0);
1020 stable_node
->rmap_hlist_len
--;
1022 put_anon_vma(rmap_item
->anon_vma
);
1023 rmap_item
->head
= NULL
;
1024 rmap_item
->address
&= PAGE_MASK
;
1026 } else if (rmap_item
->address
& UNSTABLE_FLAG
) {
1029 * Usually ksmd can and must skip the rb_erase, because
1030 * root_unstable_tree was already reset to RB_ROOT.
1031 * But be careful when an mm is exiting: do the rb_erase
1032 * if this rmap_item was inserted by this scan, rather
1033 * than left over from before.
1035 age
= (unsigned char)(ksm_scan
.seqnr
- rmap_item
->address
);
1038 rb_erase(&rmap_item
->node
,
1039 root_unstable_tree
+ NUMA(rmap_item
->nid
));
1040 ksm_pages_unshared
--;
1041 rmap_item
->address
&= PAGE_MASK
;
1044 cond_resched(); /* we're called from many long loops */
1047 static void remove_trailing_rmap_items(struct ksm_rmap_item
**rmap_list
)
1049 while (*rmap_list
) {
1050 struct ksm_rmap_item
*rmap_item
= *rmap_list
;
1051 *rmap_list
= rmap_item
->rmap_list
;
1052 remove_rmap_item_from_tree(rmap_item
);
1053 free_rmap_item(rmap_item
);
1058 * Though it's very tempting to unmerge rmap_items from stable tree rather
1059 * than check every pte of a given vma, the locking doesn't quite work for
1060 * that - an rmap_item is assigned to the stable tree after inserting ksm
1061 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
1062 * rmap_items from parent to child at fork time (so as not to waste time
1063 * if exit comes before the next scan reaches it).
1065 * Similarly, although we'd like to remove rmap_items (so updating counts
1066 * and freeing memory) when unmerging an area, it's easier to leave that
1067 * to the next pass of ksmd - consider, for example, how ksmd might be
1068 * in cmp_and_merge_page on one of the rmap_items we would be removing.
1070 static int unmerge_ksm_pages(struct vm_area_struct
*vma
,
1071 unsigned long start
, unsigned long end
, bool lock_vma
)
1076 for (addr
= start
; addr
< end
&& !err
; addr
+= PAGE_SIZE
) {
1077 if (ksm_test_exit(vma
->vm_mm
))
1079 if (signal_pending(current
))
1082 err
= break_ksm(vma
, addr
, lock_vma
);
1087 static inline struct ksm_stable_node
*folio_stable_node(struct folio
*folio
)
1089 return folio_test_ksm(folio
) ? folio_raw_mapping(folio
) : NULL
;
1092 static inline struct ksm_stable_node
*page_stable_node(struct page
*page
)
1094 return folio_stable_node(page_folio(page
));
1097 static inline void set_page_stable_node(struct page
*page
,
1098 struct ksm_stable_node
*stable_node
)
1100 VM_BUG_ON_PAGE(PageAnon(page
) && PageAnonExclusive(page
), page
);
1101 page
->mapping
= (void *)((unsigned long)stable_node
| PAGE_MAPPING_KSM
);
1106 * Only called through the sysfs control interface:
1108 static int remove_stable_node(struct ksm_stable_node
*stable_node
)
1113 page
= get_ksm_page(stable_node
, GET_KSM_PAGE_LOCK
);
1116 * get_ksm_page did remove_node_from_stable_tree itself.
1122 * Page could be still mapped if this races with __mmput() running in
1123 * between ksm_exit() and exit_mmap(). Just refuse to let
1124 * merge_across_nodes/max_page_sharing be switched.
1127 if (!page_mapped(page
)) {
1129 * The stable node did not yet appear stale to get_ksm_page(),
1130 * since that allows for an unmapped ksm page to be recognized
1131 * right up until it is freed; but the node is safe to remove.
1132 * This page might be in an LRU cache waiting to be freed,
1133 * or it might be PageSwapCache (perhaps under writeback),
1134 * or it might have been removed from swapcache a moment ago.
1136 set_page_stable_node(page
, NULL
);
1137 remove_node_from_stable_tree(stable_node
);
1146 static int remove_stable_node_chain(struct ksm_stable_node
*stable_node
,
1147 struct rb_root
*root
)
1149 struct ksm_stable_node
*dup
;
1150 struct hlist_node
*hlist_safe
;
1152 if (!is_stable_node_chain(stable_node
)) {
1153 VM_BUG_ON(is_stable_node_dup(stable_node
));
1154 if (remove_stable_node(stable_node
))
1160 hlist_for_each_entry_safe(dup
, hlist_safe
,
1161 &stable_node
->hlist
, hlist_dup
) {
1162 VM_BUG_ON(!is_stable_node_dup(dup
));
1163 if (remove_stable_node(dup
))
1166 BUG_ON(!hlist_empty(&stable_node
->hlist
));
1167 free_stable_node_chain(stable_node
, root
);
1171 static int remove_all_stable_nodes(void)
1173 struct ksm_stable_node
*stable_node
, *next
;
1177 for (nid
= 0; nid
< ksm_nr_node_ids
; nid
++) {
1178 while (root_stable_tree
[nid
].rb_node
) {
1179 stable_node
= rb_entry(root_stable_tree
[nid
].rb_node
,
1180 struct ksm_stable_node
, node
);
1181 if (remove_stable_node_chain(stable_node
,
1182 root_stable_tree
+ nid
)) {
1184 break; /* proceed to next nid */
1189 list_for_each_entry_safe(stable_node
, next
, &migrate_nodes
, list
) {
1190 if (remove_stable_node(stable_node
))
1197 static int unmerge_and_remove_all_rmap_items(void)
1199 struct ksm_mm_slot
*mm_slot
;
1200 struct mm_slot
*slot
;
1201 struct mm_struct
*mm
;
1202 struct vm_area_struct
*vma
;
1205 spin_lock(&ksm_mmlist_lock
);
1206 slot
= list_entry(ksm_mm_head
.slot
.mm_node
.next
,
1207 struct mm_slot
, mm_node
);
1208 ksm_scan
.mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
1209 spin_unlock(&ksm_mmlist_lock
);
1211 for (mm_slot
= ksm_scan
.mm_slot
; mm_slot
!= &ksm_mm_head
;
1212 mm_slot
= ksm_scan
.mm_slot
) {
1213 VMA_ITERATOR(vmi
, mm_slot
->slot
.mm
, 0);
1215 mm
= mm_slot
->slot
.mm
;
1219 * Exit right away if mm is exiting to avoid lockdep issue in
1222 if (ksm_test_exit(mm
))
1225 for_each_vma(vmi
, vma
) {
1226 if (!(vma
->vm_flags
& VM_MERGEABLE
) || !vma
->anon_vma
)
1228 err
= unmerge_ksm_pages(vma
,
1229 vma
->vm_start
, vma
->vm_end
, false);
1235 remove_trailing_rmap_items(&mm_slot
->rmap_list
);
1236 mmap_read_unlock(mm
);
1238 spin_lock(&ksm_mmlist_lock
);
1239 slot
= list_entry(mm_slot
->slot
.mm_node
.next
,
1240 struct mm_slot
, mm_node
);
1241 ksm_scan
.mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
1242 if (ksm_test_exit(mm
)) {
1243 hash_del(&mm_slot
->slot
.hash
);
1244 list_del(&mm_slot
->slot
.mm_node
);
1245 spin_unlock(&ksm_mmlist_lock
);
1247 mm_slot_free(mm_slot_cache
, mm_slot
);
1248 clear_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
1249 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
1252 spin_unlock(&ksm_mmlist_lock
);
1255 /* Clean up stable nodes, but don't worry if some are still busy */
1256 remove_all_stable_nodes();
1261 mmap_read_unlock(mm
);
1262 spin_lock(&ksm_mmlist_lock
);
1263 ksm_scan
.mm_slot
= &ksm_mm_head
;
1264 spin_unlock(&ksm_mmlist_lock
);
1267 #endif /* CONFIG_SYSFS */
1269 static u32
calc_checksum(struct page
*page
)
1272 void *addr
= kmap_local_page(page
);
1273 checksum
= xxhash(addr
, PAGE_SIZE
, 0);
1278 static int write_protect_page(struct vm_area_struct
*vma
, struct page
*page
,
1281 struct mm_struct
*mm
= vma
->vm_mm
;
1282 DEFINE_PAGE_VMA_WALK(pvmw
, page
, vma
, 0, 0);
1285 struct mmu_notifier_range range
;
1286 bool anon_exclusive
;
1289 pvmw
.address
= page_address_in_vma(page
, vma
);
1290 if (pvmw
.address
== -EFAULT
)
1293 BUG_ON(PageTransCompound(page
));
1295 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, pvmw
.address
,
1296 pvmw
.address
+ PAGE_SIZE
);
1297 mmu_notifier_invalidate_range_start(&range
);
1299 if (!page_vma_mapped_walk(&pvmw
))
1301 if (WARN_ONCE(!pvmw
.pte
, "Unexpected PMD mapping?"))
1304 anon_exclusive
= PageAnonExclusive(page
);
1305 entry
= ptep_get(pvmw
.pte
);
1306 if (pte_write(entry
) || pte_dirty(entry
) ||
1307 anon_exclusive
|| mm_tlb_flush_pending(mm
)) {
1308 swapped
= PageSwapCache(page
);
1309 flush_cache_page(vma
, pvmw
.address
, page_to_pfn(page
));
1311 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1312 * take any lock, therefore the check that we are going to make
1313 * with the pagecount against the mapcount is racy and
1314 * O_DIRECT can happen right after the check.
1315 * So we clear the pte and flush the tlb before the check
1316 * this assure us that no O_DIRECT can happen after the check
1317 * or in the middle of the check.
1319 * No need to notify as we are downgrading page table to read
1320 * only not changing it to point to a new page.
1322 * See Documentation/mm/mmu_notifier.rst
1324 entry
= ptep_clear_flush(vma
, pvmw
.address
, pvmw
.pte
);
1326 * Check that no O_DIRECT or similar I/O is in progress on the
1329 if (page_mapcount(page
) + 1 + swapped
!= page_count(page
)) {
1330 set_pte_at(mm
, pvmw
.address
, pvmw
.pte
, entry
);
1334 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
1335 if (anon_exclusive
&&
1336 folio_try_share_anon_rmap_pte(page_folio(page
), page
)) {
1337 set_pte_at(mm
, pvmw
.address
, pvmw
.pte
, entry
);
1341 if (pte_dirty(entry
))
1342 set_page_dirty(page
);
1343 entry
= pte_mkclean(entry
);
1345 if (pte_write(entry
))
1346 entry
= pte_wrprotect(entry
);
1348 set_pte_at_notify(mm
, pvmw
.address
, pvmw
.pte
, entry
);
1354 page_vma_mapped_walk_done(&pvmw
);
1356 mmu_notifier_invalidate_range_end(&range
);
1362 * replace_page - replace page in vma by new ksm page
1363 * @vma: vma that holds the pte pointing to page
1364 * @page: the page we are replacing by kpage
1365 * @kpage: the ksm page we replace page by
1366 * @orig_pte: the original value of the pte
1368 * Returns 0 on success, -EFAULT on failure.
1370 static int replace_page(struct vm_area_struct
*vma
, struct page
*page
,
1371 struct page
*kpage
, pte_t orig_pte
)
1373 struct folio
*kfolio
= page_folio(kpage
);
1374 struct mm_struct
*mm
= vma
->vm_mm
;
1375 struct folio
*folio
;
1383 struct mmu_notifier_range range
;
1385 addr
= page_address_in_vma(page
, vma
);
1386 if (addr
== -EFAULT
)
1389 pmd
= mm_find_pmd(mm
, addr
);
1393 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
1394 * without holding anon_vma lock for write. So when looking for a
1395 * genuine pmde (in which to find pte), test present and !THP together.
1397 pmde
= pmdp_get_lockless(pmd
);
1398 if (!pmd_present(pmde
) || pmd_trans_huge(pmde
))
1401 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, mm
, addr
,
1403 mmu_notifier_invalidate_range_start(&range
);
1405 ptep
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
1408 if (!pte_same(ptep_get(ptep
), orig_pte
)) {
1409 pte_unmap_unlock(ptep
, ptl
);
1412 VM_BUG_ON_PAGE(PageAnonExclusive(page
), page
);
1413 VM_BUG_ON_FOLIO(folio_test_anon(kfolio
) && PageAnonExclusive(kpage
),
1417 * No need to check ksm_use_zero_pages here: we can only have a
1418 * zero_page here if ksm_use_zero_pages was enabled already.
1420 if (!is_zero_pfn(page_to_pfn(kpage
))) {
1422 folio_add_anon_rmap_pte(kfolio
, kpage
, vma
, addr
, RMAP_NONE
);
1423 newpte
= mk_pte(kpage
, vma
->vm_page_prot
);
1426 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1427 * we can easily track all KSM-placed zero pages by checking if
1428 * the dirty bit in zero page's PTE is set.
1430 newpte
= pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage
), vma
->vm_page_prot
)));
1432 mm
->ksm_zero_pages
++;
1434 * We're replacing an anonymous page with a zero page, which is
1435 * not anonymous. We need to do proper accounting otherwise we
1436 * will get wrong values in /proc, and a BUG message in dmesg
1437 * when tearing down the mm.
1439 dec_mm_counter(mm
, MM_ANONPAGES
);
1442 flush_cache_page(vma
, addr
, pte_pfn(ptep_get(ptep
)));
1444 * No need to notify as we are replacing a read only page with another
1445 * read only page with the same content.
1447 * See Documentation/mm/mmu_notifier.rst
1449 ptep_clear_flush(vma
, addr
, ptep
);
1450 set_pte_at_notify(mm
, addr
, ptep
, newpte
);
1452 folio
= page_folio(page
);
1453 folio_remove_rmap_pte(folio
, page
, vma
);
1454 if (!folio_mapped(folio
))
1455 folio_free_swap(folio
);
1458 pte_unmap_unlock(ptep
, ptl
);
1461 mmu_notifier_invalidate_range_end(&range
);
1467 * try_to_merge_one_page - take two pages and merge them into one
1468 * @vma: the vma that holds the pte pointing to page
1469 * @page: the PageAnon page that we want to replace with kpage
1470 * @kpage: the PageKsm page that we want to map instead of page,
1471 * or NULL the first time when we want to use page as kpage.
1473 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1475 static int try_to_merge_one_page(struct vm_area_struct
*vma
,
1476 struct page
*page
, struct page
*kpage
)
1478 pte_t orig_pte
= __pte(0);
1481 if (page
== kpage
) /* ksm page forked */
1484 if (!PageAnon(page
))
1488 * We need the page lock to read a stable PageSwapCache in
1489 * write_protect_page(). We use trylock_page() instead of
1490 * lock_page() because we don't want to wait here - we
1491 * prefer to continue scanning and merging different pages,
1492 * then come back to this page when it is unlocked.
1494 if (!trylock_page(page
))
1497 if (PageTransCompound(page
)) {
1498 if (split_huge_page(page
))
1503 * If this anonymous page is mapped only here, its pte may need
1504 * to be write-protected. If it's mapped elsewhere, all of its
1505 * ptes are necessarily already write-protected. But in either
1506 * case, we need to lock and check page_count is not raised.
1508 if (write_protect_page(vma
, page
, &orig_pte
) == 0) {
1511 * While we hold page lock, upgrade page from
1512 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1513 * stable_tree_insert() will update stable_node.
1515 set_page_stable_node(page
, NULL
);
1516 mark_page_accessed(page
);
1518 * Page reclaim just frees a clean page with no dirty
1519 * ptes: make sure that the ksm page would be swapped.
1521 if (!PageDirty(page
))
1524 } else if (pages_identical(page
, kpage
))
1525 err
= replace_page(vma
, page
, kpage
, orig_pte
);
1535 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1536 * but no new kernel page is allocated: kpage must already be a ksm page.
1538 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1540 static int try_to_merge_with_ksm_page(struct ksm_rmap_item
*rmap_item
,
1541 struct page
*page
, struct page
*kpage
)
1543 struct mm_struct
*mm
= rmap_item
->mm
;
1544 struct vm_area_struct
*vma
;
1548 vma
= find_mergeable_vma(mm
, rmap_item
->address
);
1552 err
= try_to_merge_one_page(vma
, page
, kpage
);
1556 /* Unstable nid is in union with stable anon_vma: remove first */
1557 remove_rmap_item_from_tree(rmap_item
);
1559 /* Must get reference to anon_vma while still holding mmap_lock */
1560 rmap_item
->anon_vma
= vma
->anon_vma
;
1561 get_anon_vma(vma
->anon_vma
);
1563 mmap_read_unlock(mm
);
1564 trace_ksm_merge_with_ksm_page(kpage
, page_to_pfn(kpage
? kpage
: page
),
1565 rmap_item
, mm
, err
);
1570 * try_to_merge_two_pages - take two identical pages and prepare them
1571 * to be merged into one page.
1573 * This function returns the kpage if we successfully merged two identical
1574 * pages into one ksm page, NULL otherwise.
1576 * Note that this function upgrades page to ksm page: if one of the pages
1577 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1579 static struct page
*try_to_merge_two_pages(struct ksm_rmap_item
*rmap_item
,
1581 struct ksm_rmap_item
*tree_rmap_item
,
1582 struct page
*tree_page
)
1586 err
= try_to_merge_with_ksm_page(rmap_item
, page
, NULL
);
1588 err
= try_to_merge_with_ksm_page(tree_rmap_item
,
1591 * If that fails, we have a ksm page with only one pte
1592 * pointing to it: so break it.
1595 break_cow(rmap_item
);
1597 return err
? NULL
: page
;
1600 static __always_inline
1601 bool __is_page_sharing_candidate(struct ksm_stable_node
*stable_node
, int offset
)
1603 VM_BUG_ON(stable_node
->rmap_hlist_len
< 0);
1605 * Check that at least one mapping still exists, otherwise
1606 * there's no much point to merge and share with this
1607 * stable_node, as the underlying tree_page of the other
1608 * sharer is going to be freed soon.
1610 return stable_node
->rmap_hlist_len
&&
1611 stable_node
->rmap_hlist_len
+ offset
< ksm_max_page_sharing
;
1614 static __always_inline
1615 bool is_page_sharing_candidate(struct ksm_stable_node
*stable_node
)
1617 return __is_page_sharing_candidate(stable_node
, 0);
1620 static struct page
*stable_node_dup(struct ksm_stable_node
**_stable_node_dup
,
1621 struct ksm_stable_node
**_stable_node
,
1622 struct rb_root
*root
,
1623 bool prune_stale_stable_nodes
)
1625 struct ksm_stable_node
*dup
, *found
= NULL
, *stable_node
= *_stable_node
;
1626 struct hlist_node
*hlist_safe
;
1627 struct page
*_tree_page
, *tree_page
= NULL
;
1629 int found_rmap_hlist_len
;
1631 if (!prune_stale_stable_nodes
||
1632 time_before(jiffies
, stable_node
->chain_prune_time
+
1634 ksm_stable_node_chains_prune_millisecs
)))
1635 prune_stale_stable_nodes
= false;
1637 stable_node
->chain_prune_time
= jiffies
;
1639 hlist_for_each_entry_safe(dup
, hlist_safe
,
1640 &stable_node
->hlist
, hlist_dup
) {
1643 * We must walk all stable_node_dup to prune the stale
1644 * stable nodes during lookup.
1646 * get_ksm_page can drop the nodes from the
1647 * stable_node->hlist if they point to freed pages
1648 * (that's why we do a _safe walk). The "dup"
1649 * stable_node parameter itself will be freed from
1650 * under us if it returns NULL.
1652 _tree_page
= get_ksm_page(dup
, GET_KSM_PAGE_NOLOCK
);
1656 if (is_page_sharing_candidate(dup
)) {
1658 dup
->rmap_hlist_len
> found_rmap_hlist_len
) {
1660 put_page(tree_page
);
1662 found_rmap_hlist_len
= found
->rmap_hlist_len
;
1663 tree_page
= _tree_page
;
1665 /* skip put_page for found dup */
1666 if (!prune_stale_stable_nodes
)
1671 put_page(_tree_page
);
1676 * nr is counting all dups in the chain only if
1677 * prune_stale_stable_nodes is true, otherwise we may
1678 * break the loop at nr == 1 even if there are
1681 if (prune_stale_stable_nodes
&& nr
== 1) {
1683 * If there's not just one entry it would
1684 * corrupt memory, better BUG_ON. In KSM
1685 * context with no lock held it's not even
1688 BUG_ON(stable_node
->hlist
.first
->next
);
1691 * There's just one entry and it is below the
1692 * deduplication limit so drop the chain.
1694 rb_replace_node(&stable_node
->node
, &found
->node
,
1696 free_stable_node(stable_node
);
1697 ksm_stable_node_chains
--;
1698 ksm_stable_node_dups
--;
1700 * NOTE: the caller depends on the stable_node
1701 * to be equal to stable_node_dup if the chain
1704 *_stable_node
= found
;
1706 * Just for robustness, as stable_node is
1707 * otherwise left as a stable pointer, the
1708 * compiler shall optimize it away at build
1712 } else if (stable_node
->hlist
.first
!= &found
->hlist_dup
&&
1713 __is_page_sharing_candidate(found
, 1)) {
1715 * If the found stable_node dup can accept one
1716 * more future merge (in addition to the one
1717 * that is underway) and is not at the head of
1718 * the chain, put it there so next search will
1719 * be quicker in the !prune_stale_stable_nodes
1722 * NOTE: it would be inaccurate to use nr > 1
1723 * instead of checking the hlist.first pointer
1724 * directly, because in the
1725 * prune_stale_stable_nodes case "nr" isn't
1726 * the position of the found dup in the chain,
1727 * but the total number of dups in the chain.
1729 hlist_del(&found
->hlist_dup
);
1730 hlist_add_head(&found
->hlist_dup
,
1731 &stable_node
->hlist
);
1735 *_stable_node_dup
= found
;
1739 static struct ksm_stable_node
*stable_node_dup_any(struct ksm_stable_node
*stable_node
,
1740 struct rb_root
*root
)
1742 if (!is_stable_node_chain(stable_node
))
1744 if (hlist_empty(&stable_node
->hlist
)) {
1745 free_stable_node_chain(stable_node
, root
);
1748 return hlist_entry(stable_node
->hlist
.first
,
1749 typeof(*stable_node
), hlist_dup
);
1753 * Like for get_ksm_page, this function can free the *_stable_node and
1754 * *_stable_node_dup if the returned tree_page is NULL.
1756 * It can also free and overwrite *_stable_node with the found
1757 * stable_node_dup if the chain is collapsed (in which case
1758 * *_stable_node will be equal to *_stable_node_dup like if the chain
1759 * never existed). It's up to the caller to verify tree_page is not
1760 * NULL before dereferencing *_stable_node or *_stable_node_dup.
1762 * *_stable_node_dup is really a second output parameter of this
1763 * function and will be overwritten in all cases, the caller doesn't
1764 * need to initialize it.
1766 static struct page
*__stable_node_chain(struct ksm_stable_node
**_stable_node_dup
,
1767 struct ksm_stable_node
**_stable_node
,
1768 struct rb_root
*root
,
1769 bool prune_stale_stable_nodes
)
1771 struct ksm_stable_node
*stable_node
= *_stable_node
;
1772 if (!is_stable_node_chain(stable_node
)) {
1773 if (is_page_sharing_candidate(stable_node
)) {
1774 *_stable_node_dup
= stable_node
;
1775 return get_ksm_page(stable_node
, GET_KSM_PAGE_NOLOCK
);
1778 * _stable_node_dup set to NULL means the stable_node
1779 * reached the ksm_max_page_sharing limit.
1781 *_stable_node_dup
= NULL
;
1784 return stable_node_dup(_stable_node_dup
, _stable_node
, root
,
1785 prune_stale_stable_nodes
);
1788 static __always_inline
struct page
*chain_prune(struct ksm_stable_node
**s_n_d
,
1789 struct ksm_stable_node
**s_n
,
1790 struct rb_root
*root
)
1792 return __stable_node_chain(s_n_d
, s_n
, root
, true);
1795 static __always_inline
struct page
*chain(struct ksm_stable_node
**s_n_d
,
1796 struct ksm_stable_node
*s_n
,
1797 struct rb_root
*root
)
1799 struct ksm_stable_node
*old_stable_node
= s_n
;
1800 struct page
*tree_page
;
1802 tree_page
= __stable_node_chain(s_n_d
, &s_n
, root
, false);
1803 /* not pruning dups so s_n cannot have changed */
1804 VM_BUG_ON(s_n
!= old_stable_node
);
1809 * stable_tree_search - search for page inside the stable tree
1811 * This function checks if there is a page inside the stable tree
1812 * with identical content to the page that we are scanning right now.
1814 * This function returns the stable tree node of identical content if found,
1817 static struct page
*stable_tree_search(struct page
*page
)
1820 struct rb_root
*root
;
1821 struct rb_node
**new;
1822 struct rb_node
*parent
;
1823 struct ksm_stable_node
*stable_node
, *stable_node_dup
, *stable_node_any
;
1824 struct ksm_stable_node
*page_node
;
1826 page_node
= page_stable_node(page
);
1827 if (page_node
&& page_node
->head
!= &migrate_nodes
) {
1828 /* ksm page forked */
1833 nid
= get_kpfn_nid(page_to_pfn(page
));
1834 root
= root_stable_tree
+ nid
;
1836 new = &root
->rb_node
;
1840 struct page
*tree_page
;
1844 stable_node
= rb_entry(*new, struct ksm_stable_node
, node
);
1845 stable_node_any
= NULL
;
1846 tree_page
= chain_prune(&stable_node_dup
, &stable_node
, root
);
1848 * NOTE: stable_node may have been freed by
1849 * chain_prune() if the returned stable_node_dup is
1850 * not NULL. stable_node_dup may have been inserted in
1851 * the rbtree instead as a regular stable_node (in
1852 * order to collapse the stable_node chain if a single
1853 * stable_node dup was found in it). In such case the
1854 * stable_node is overwritten by the callee to point
1855 * to the stable_node_dup that was collapsed in the
1856 * stable rbtree and stable_node will be equal to
1857 * stable_node_dup like if the chain never existed.
1859 if (!stable_node_dup
) {
1861 * Either all stable_node dups were full in
1862 * this stable_node chain, or this chain was
1863 * empty and should be rb_erased.
1865 stable_node_any
= stable_node_dup_any(stable_node
,
1867 if (!stable_node_any
) {
1868 /* rb_erase just run */
1872 * Take any of the stable_node dups page of
1873 * this stable_node chain to let the tree walk
1874 * continue. All KSM pages belonging to the
1875 * stable_node dups in a stable_node chain
1876 * have the same content and they're
1877 * write protected at all times. Any will work
1878 * fine to continue the walk.
1880 tree_page
= get_ksm_page(stable_node_any
,
1881 GET_KSM_PAGE_NOLOCK
);
1883 VM_BUG_ON(!stable_node_dup
^ !!stable_node_any
);
1886 * If we walked over a stale stable_node,
1887 * get_ksm_page() will call rb_erase() and it
1888 * may rebalance the tree from under us. So
1889 * restart the search from scratch. Returning
1890 * NULL would be safe too, but we'd generate
1891 * false negative insertions just because some
1892 * stable_node was stale.
1897 ret
= memcmp_pages(page
, tree_page
);
1898 put_page(tree_page
);
1902 new = &parent
->rb_left
;
1904 new = &parent
->rb_right
;
1907 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
1909 * Test if the migrated page should be merged
1910 * into a stable node dup. If the mapcount is
1911 * 1 we can migrate it with another KSM page
1912 * without adding it to the chain.
1914 if (page_mapcount(page
) > 1)
1918 if (!stable_node_dup
) {
1920 * If the stable_node is a chain and
1921 * we got a payload match in memcmp
1922 * but we cannot merge the scanned
1923 * page in any of the existing
1924 * stable_node dups because they're
1925 * all full, we need to wait the
1926 * scanned page to find itself a match
1927 * in the unstable tree to create a
1928 * brand new KSM page to add later to
1929 * the dups of this stable_node.
1935 * Lock and unlock the stable_node's page (which
1936 * might already have been migrated) so that page
1937 * migration is sure to notice its raised count.
1938 * It would be more elegant to return stable_node
1939 * than kpage, but that involves more changes.
1941 tree_page
= get_ksm_page(stable_node_dup
,
1942 GET_KSM_PAGE_TRYLOCK
);
1944 if (PTR_ERR(tree_page
) == -EBUSY
)
1945 return ERR_PTR(-EBUSY
);
1947 if (unlikely(!tree_page
))
1949 * The tree may have been rebalanced,
1950 * so re-evaluate parent and new.
1953 unlock_page(tree_page
);
1955 if (get_kpfn_nid(stable_node_dup
->kpfn
) !=
1956 NUMA(stable_node_dup
->nid
)) {
1957 put_page(tree_page
);
1967 list_del(&page_node
->list
);
1968 DO_NUMA(page_node
->nid
= nid
);
1969 rb_link_node(&page_node
->node
, parent
, new);
1970 rb_insert_color(&page_node
->node
, root
);
1972 if (is_page_sharing_candidate(page_node
)) {
1980 * If stable_node was a chain and chain_prune collapsed it,
1981 * stable_node has been updated to be the new regular
1982 * stable_node. A collapse of the chain is indistinguishable
1983 * from the case there was no chain in the stable
1984 * rbtree. Otherwise stable_node is the chain and
1985 * stable_node_dup is the dup to replace.
1987 if (stable_node_dup
== stable_node
) {
1988 VM_BUG_ON(is_stable_node_chain(stable_node_dup
));
1989 VM_BUG_ON(is_stable_node_dup(stable_node_dup
));
1990 /* there is no chain */
1992 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
1993 list_del(&page_node
->list
);
1994 DO_NUMA(page_node
->nid
= nid
);
1995 rb_replace_node(&stable_node_dup
->node
,
1998 if (is_page_sharing_candidate(page_node
))
2003 rb_erase(&stable_node_dup
->node
, root
);
2007 VM_BUG_ON(!is_stable_node_chain(stable_node
));
2008 __stable_node_dup_del(stable_node_dup
);
2010 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
2011 list_del(&page_node
->list
);
2012 DO_NUMA(page_node
->nid
= nid
);
2013 stable_node_chain_add_dup(page_node
, stable_node
);
2014 if (is_page_sharing_candidate(page_node
))
2022 stable_node_dup
->head
= &migrate_nodes
;
2023 list_add(&stable_node_dup
->list
, stable_node_dup
->head
);
2027 /* stable_node_dup could be null if it reached the limit */
2028 if (!stable_node_dup
)
2029 stable_node_dup
= stable_node_any
;
2031 * If stable_node was a chain and chain_prune collapsed it,
2032 * stable_node has been updated to be the new regular
2033 * stable_node. A collapse of the chain is indistinguishable
2034 * from the case there was no chain in the stable
2035 * rbtree. Otherwise stable_node is the chain and
2036 * stable_node_dup is the dup to replace.
2038 if (stable_node_dup
== stable_node
) {
2039 VM_BUG_ON(is_stable_node_dup(stable_node_dup
));
2040 /* chain is missing so create it */
2041 stable_node
= alloc_stable_node_chain(stable_node_dup
,
2047 * Add this stable_node dup that was
2048 * migrated to the stable_node chain
2049 * of the current nid for this page
2052 VM_BUG_ON(!is_stable_node_dup(stable_node_dup
));
2053 VM_BUG_ON(page_node
->head
!= &migrate_nodes
);
2054 list_del(&page_node
->list
);
2055 DO_NUMA(page_node
->nid
= nid
);
2056 stable_node_chain_add_dup(page_node
, stable_node
);
2061 * stable_tree_insert - insert stable tree node pointing to new ksm page
2062 * into the stable tree.
2064 * This function returns the stable tree node just allocated on success,
2067 static struct ksm_stable_node
*stable_tree_insert(struct page
*kpage
)
2071 struct rb_root
*root
;
2072 struct rb_node
**new;
2073 struct rb_node
*parent
;
2074 struct ksm_stable_node
*stable_node
, *stable_node_dup
, *stable_node_any
;
2075 bool need_chain
= false;
2077 kpfn
= page_to_pfn(kpage
);
2078 nid
= get_kpfn_nid(kpfn
);
2079 root
= root_stable_tree
+ nid
;
2082 new = &root
->rb_node
;
2085 struct page
*tree_page
;
2089 stable_node
= rb_entry(*new, struct ksm_stable_node
, node
);
2090 stable_node_any
= NULL
;
2091 tree_page
= chain(&stable_node_dup
, stable_node
, root
);
2092 if (!stable_node_dup
) {
2094 * Either all stable_node dups were full in
2095 * this stable_node chain, or this chain was
2096 * empty and should be rb_erased.
2098 stable_node_any
= stable_node_dup_any(stable_node
,
2100 if (!stable_node_any
) {
2101 /* rb_erase just run */
2105 * Take any of the stable_node dups page of
2106 * this stable_node chain to let the tree walk
2107 * continue. All KSM pages belonging to the
2108 * stable_node dups in a stable_node chain
2109 * have the same content and they're
2110 * write protected at all times. Any will work
2111 * fine to continue the walk.
2113 tree_page
= get_ksm_page(stable_node_any
,
2114 GET_KSM_PAGE_NOLOCK
);
2116 VM_BUG_ON(!stable_node_dup
^ !!stable_node_any
);
2119 * If we walked over a stale stable_node,
2120 * get_ksm_page() will call rb_erase() and it
2121 * may rebalance the tree from under us. So
2122 * restart the search from scratch. Returning
2123 * NULL would be safe too, but we'd generate
2124 * false negative insertions just because some
2125 * stable_node was stale.
2130 ret
= memcmp_pages(kpage
, tree_page
);
2131 put_page(tree_page
);
2135 new = &parent
->rb_left
;
2137 new = &parent
->rb_right
;
2144 stable_node_dup
= alloc_stable_node();
2145 if (!stable_node_dup
)
2148 INIT_HLIST_HEAD(&stable_node_dup
->hlist
);
2149 stable_node_dup
->kpfn
= kpfn
;
2150 set_page_stable_node(kpage
, stable_node_dup
);
2151 stable_node_dup
->rmap_hlist_len
= 0;
2152 DO_NUMA(stable_node_dup
->nid
= nid
);
2154 rb_link_node(&stable_node_dup
->node
, parent
, new);
2155 rb_insert_color(&stable_node_dup
->node
, root
);
2157 if (!is_stable_node_chain(stable_node
)) {
2158 struct ksm_stable_node
*orig
= stable_node
;
2159 /* chain is missing so create it */
2160 stable_node
= alloc_stable_node_chain(orig
, root
);
2162 free_stable_node(stable_node_dup
);
2166 stable_node_chain_add_dup(stable_node_dup
, stable_node
);
2169 return stable_node_dup
;
2173 * unstable_tree_search_insert - search for identical page,
2174 * else insert rmap_item into the unstable tree.
2176 * This function searches for a page in the unstable tree identical to the
2177 * page currently being scanned; and if no identical page is found in the
2178 * tree, we insert rmap_item as a new object into the unstable tree.
2180 * This function returns pointer to rmap_item found to be identical
2181 * to the currently scanned page, NULL otherwise.
2183 * This function does both searching and inserting, because they share
2184 * the same walking algorithm in an rbtree.
2187 struct ksm_rmap_item
*unstable_tree_search_insert(struct ksm_rmap_item
*rmap_item
,
2189 struct page
**tree_pagep
)
2191 struct rb_node
**new;
2192 struct rb_root
*root
;
2193 struct rb_node
*parent
= NULL
;
2196 nid
= get_kpfn_nid(page_to_pfn(page
));
2197 root
= root_unstable_tree
+ nid
;
2198 new = &root
->rb_node
;
2201 struct ksm_rmap_item
*tree_rmap_item
;
2202 struct page
*tree_page
;
2206 tree_rmap_item
= rb_entry(*new, struct ksm_rmap_item
, node
);
2207 tree_page
= get_mergeable_page(tree_rmap_item
);
2212 * Don't substitute a ksm page for a forked page.
2214 if (page
== tree_page
) {
2215 put_page(tree_page
);
2219 ret
= memcmp_pages(page
, tree_page
);
2223 put_page(tree_page
);
2224 new = &parent
->rb_left
;
2225 } else if (ret
> 0) {
2226 put_page(tree_page
);
2227 new = &parent
->rb_right
;
2228 } else if (!ksm_merge_across_nodes
&&
2229 page_to_nid(tree_page
) != nid
) {
2231 * If tree_page has been migrated to another NUMA node,
2232 * it will be flushed out and put in the right unstable
2233 * tree next time: only merge with it when across_nodes.
2235 put_page(tree_page
);
2238 *tree_pagep
= tree_page
;
2239 return tree_rmap_item
;
2243 rmap_item
->address
|= UNSTABLE_FLAG
;
2244 rmap_item
->address
|= (ksm_scan
.seqnr
& SEQNR_MASK
);
2245 DO_NUMA(rmap_item
->nid
= nid
);
2246 rb_link_node(&rmap_item
->node
, parent
, new);
2247 rb_insert_color(&rmap_item
->node
, root
);
2249 ksm_pages_unshared
++;
2254 * stable_tree_append - add another rmap_item to the linked list of
2255 * rmap_items hanging off a given node of the stable tree, all sharing
2256 * the same ksm page.
2258 static void stable_tree_append(struct ksm_rmap_item
*rmap_item
,
2259 struct ksm_stable_node
*stable_node
,
2260 bool max_page_sharing_bypass
)
2263 * rmap won't find this mapping if we don't insert the
2264 * rmap_item in the right stable_node
2265 * duplicate. page_migration could break later if rmap breaks,
2266 * so we can as well crash here. We really need to check for
2267 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2268 * for other negative values as an underflow if detected here
2269 * for the first time (and not when decreasing rmap_hlist_len)
2270 * would be sign of memory corruption in the stable_node.
2272 BUG_ON(stable_node
->rmap_hlist_len
< 0);
2274 stable_node
->rmap_hlist_len
++;
2275 if (!max_page_sharing_bypass
)
2276 /* possibly non fatal but unexpected overflow, only warn */
2277 WARN_ON_ONCE(stable_node
->rmap_hlist_len
>
2278 ksm_max_page_sharing
);
2280 rmap_item
->head
= stable_node
;
2281 rmap_item
->address
|= STABLE_FLAG
;
2282 hlist_add_head(&rmap_item
->hlist
, &stable_node
->hlist
);
2284 if (rmap_item
->hlist
.next
)
2285 ksm_pages_sharing
++;
2289 rmap_item
->mm
->ksm_merging_pages
++;
2293 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2294 * if not, compare checksum to previous and if it's the same, see if page can
2295 * be inserted into the unstable tree, or merged with a page already there and
2296 * both transferred to the stable tree.
2298 * @page: the page that we are searching identical page to.
2299 * @rmap_item: the reverse mapping into the virtual address of this page
2301 static void cmp_and_merge_page(struct page
*page
, struct ksm_rmap_item
*rmap_item
)
2303 struct mm_struct
*mm
= rmap_item
->mm
;
2304 struct ksm_rmap_item
*tree_rmap_item
;
2305 struct page
*tree_page
= NULL
;
2306 struct ksm_stable_node
*stable_node
;
2308 unsigned int checksum
;
2310 bool max_page_sharing_bypass
= false;
2312 stable_node
= page_stable_node(page
);
2314 if (stable_node
->head
!= &migrate_nodes
&&
2315 get_kpfn_nid(READ_ONCE(stable_node
->kpfn
)) !=
2316 NUMA(stable_node
->nid
)) {
2317 stable_node_dup_del(stable_node
);
2318 stable_node
->head
= &migrate_nodes
;
2319 list_add(&stable_node
->list
, stable_node
->head
);
2321 if (stable_node
->head
!= &migrate_nodes
&&
2322 rmap_item
->head
== stable_node
)
2325 * If it's a KSM fork, allow it to go over the sharing limit
2328 if (!is_page_sharing_candidate(stable_node
))
2329 max_page_sharing_bypass
= true;
2332 /* We first start with searching the page inside the stable tree */
2333 kpage
= stable_tree_search(page
);
2334 if (kpage
== page
&& rmap_item
->head
== stable_node
) {
2339 remove_rmap_item_from_tree(rmap_item
);
2342 if (PTR_ERR(kpage
) == -EBUSY
)
2345 err
= try_to_merge_with_ksm_page(rmap_item
, page
, kpage
);
2348 * The page was successfully merged:
2349 * add its rmap_item to the stable tree.
2352 stable_tree_append(rmap_item
, page_stable_node(kpage
),
2353 max_page_sharing_bypass
);
2361 * If the hash value of the page has changed from the last time
2362 * we calculated it, this page is changing frequently: therefore we
2363 * don't want to insert it in the unstable tree, and we don't want
2364 * to waste our time searching for something identical to it there.
2366 checksum
= calc_checksum(page
);
2367 if (rmap_item
->oldchecksum
!= checksum
) {
2368 rmap_item
->oldchecksum
= checksum
;
2373 * Same checksum as an empty page. We attempt to merge it with the
2374 * appropriate zero page if the user enabled this via sysfs.
2376 if (ksm_use_zero_pages
&& (checksum
== zero_checksum
)) {
2377 struct vm_area_struct
*vma
;
2380 vma
= find_mergeable_vma(mm
, rmap_item
->address
);
2382 err
= try_to_merge_one_page(vma
, page
,
2383 ZERO_PAGE(rmap_item
->address
));
2384 trace_ksm_merge_one_page(
2385 page_to_pfn(ZERO_PAGE(rmap_item
->address
)),
2386 rmap_item
, mm
, err
);
2389 * If the vma is out of date, we do not need to
2394 mmap_read_unlock(mm
);
2396 * In case of failure, the page was not really empty, so we
2397 * need to continue. Otherwise we're done.
2403 unstable_tree_search_insert(rmap_item
, page
, &tree_page
);
2404 if (tree_rmap_item
) {
2407 kpage
= try_to_merge_two_pages(rmap_item
, page
,
2408 tree_rmap_item
, tree_page
);
2410 * If both pages we tried to merge belong to the same compound
2411 * page, then we actually ended up increasing the reference
2412 * count of the same compound page twice, and split_huge_page
2414 * Here we set a flag if that happened, and we use it later to
2415 * try split_huge_page again. Since we call put_page right
2416 * afterwards, the reference count will be correct and
2417 * split_huge_page should succeed.
2419 split
= PageTransCompound(page
)
2420 && compound_head(page
) == compound_head(tree_page
);
2421 put_page(tree_page
);
2424 * The pages were successfully merged: insert new
2425 * node in the stable tree and add both rmap_items.
2428 stable_node
= stable_tree_insert(kpage
);
2430 stable_tree_append(tree_rmap_item
, stable_node
,
2432 stable_tree_append(rmap_item
, stable_node
,
2438 * If we fail to insert the page into the stable tree,
2439 * we will have 2 virtual addresses that are pointing
2440 * to a ksm page left outside the stable tree,
2441 * in which case we need to break_cow on both.
2444 break_cow(tree_rmap_item
);
2445 break_cow(rmap_item
);
2449 * We are here if we tried to merge two pages and
2450 * failed because they both belonged to the same
2451 * compound page. We will split the page now, but no
2452 * merging will take place.
2453 * We do not want to add the cost of a full lock; if
2454 * the page is locked, it is better to skip it and
2455 * perhaps try again later.
2457 if (!trylock_page(page
))
2459 split_huge_page(page
);
2465 static struct ksm_rmap_item
*get_next_rmap_item(struct ksm_mm_slot
*mm_slot
,
2466 struct ksm_rmap_item
**rmap_list
,
2469 struct ksm_rmap_item
*rmap_item
;
2471 while (*rmap_list
) {
2472 rmap_item
= *rmap_list
;
2473 if ((rmap_item
->address
& PAGE_MASK
) == addr
)
2475 if (rmap_item
->address
> addr
)
2477 *rmap_list
= rmap_item
->rmap_list
;
2478 remove_rmap_item_from_tree(rmap_item
);
2479 free_rmap_item(rmap_item
);
2482 rmap_item
= alloc_rmap_item();
2484 /* It has already been zeroed */
2485 rmap_item
->mm
= mm_slot
->slot
.mm
;
2486 rmap_item
->mm
->ksm_rmap_items
++;
2487 rmap_item
->address
= addr
;
2488 rmap_item
->rmap_list
= *rmap_list
;
2489 *rmap_list
= rmap_item
;
2495 * Calculate skip age for the ksm page age. The age determines how often
2496 * de-duplicating has already been tried unsuccessfully. If the age is
2497 * smaller, the scanning of this page is skipped for less scans.
2499 * @age: rmap_item age of page
2501 static unsigned int skip_age(rmap_age_t age
)
2514 * Determines if a page should be skipped for the current scan.
2516 * @page: page to check
2517 * @rmap_item: associated rmap_item of page
2519 static bool should_skip_rmap_item(struct page
*page
,
2520 struct ksm_rmap_item
*rmap_item
)
2524 if (!ksm_smart_scan
)
2528 * Never skip pages that are already KSM; pages cmp_and_merge_page()
2529 * will essentially ignore them, but we still have to process them
2535 age
= rmap_item
->age
;
2540 * Smaller ages are not skipped, they need to get a chance to go
2541 * through the different phases of the KSM merging.
2547 * Are we still allowed to skip? If not, then don't skip it
2548 * and determine how much more often we are allowed to skip next.
2550 if (!rmap_item
->remaining_skips
) {
2551 rmap_item
->remaining_skips
= skip_age(age
);
2555 /* Skip this page */
2556 ksm_pages_skipped
++;
2557 rmap_item
->remaining_skips
--;
2558 remove_rmap_item_from_tree(rmap_item
);
2562 static struct ksm_rmap_item
*scan_get_next_rmap_item(struct page
**page
)
2564 struct mm_struct
*mm
;
2565 struct ksm_mm_slot
*mm_slot
;
2566 struct mm_slot
*slot
;
2567 struct vm_area_struct
*vma
;
2568 struct ksm_rmap_item
*rmap_item
;
2569 struct vma_iterator vmi
;
2572 if (list_empty(&ksm_mm_head
.slot
.mm_node
))
2575 mm_slot
= ksm_scan
.mm_slot
;
2576 if (mm_slot
== &ksm_mm_head
) {
2577 advisor_start_scan();
2578 trace_ksm_start_scan(ksm_scan
.seqnr
, ksm_rmap_items
);
2581 * A number of pages can hang around indefinitely in per-cpu
2582 * LRU cache, raised page count preventing write_protect_page
2583 * from merging them. Though it doesn't really matter much,
2584 * it is puzzling to see some stuck in pages_volatile until
2585 * other activity jostles them out, and they also prevented
2586 * LTP's KSM test from succeeding deterministically; so drain
2587 * them here (here rather than on entry to ksm_do_scan(),
2588 * so we don't IPI too often when pages_to_scan is set low).
2590 lru_add_drain_all();
2593 * Whereas stale stable_nodes on the stable_tree itself
2594 * get pruned in the regular course of stable_tree_search(),
2595 * those moved out to the migrate_nodes list can accumulate:
2596 * so prune them once before each full scan.
2598 if (!ksm_merge_across_nodes
) {
2599 struct ksm_stable_node
*stable_node
, *next
;
2602 list_for_each_entry_safe(stable_node
, next
,
2603 &migrate_nodes
, list
) {
2604 page
= get_ksm_page(stable_node
,
2605 GET_KSM_PAGE_NOLOCK
);
2612 for (nid
= 0; nid
< ksm_nr_node_ids
; nid
++)
2613 root_unstable_tree
[nid
] = RB_ROOT
;
2615 spin_lock(&ksm_mmlist_lock
);
2616 slot
= list_entry(mm_slot
->slot
.mm_node
.next
,
2617 struct mm_slot
, mm_node
);
2618 mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
2619 ksm_scan
.mm_slot
= mm_slot
;
2620 spin_unlock(&ksm_mmlist_lock
);
2622 * Although we tested list_empty() above, a racing __ksm_exit
2623 * of the last mm on the list may have removed it since then.
2625 if (mm_slot
== &ksm_mm_head
)
2628 ksm_scan
.address
= 0;
2629 ksm_scan
.rmap_list
= &mm_slot
->rmap_list
;
2632 slot
= &mm_slot
->slot
;
2634 vma_iter_init(&vmi
, mm
, ksm_scan
.address
);
2637 if (ksm_test_exit(mm
))
2640 for_each_vma(vmi
, vma
) {
2641 if (!(vma
->vm_flags
& VM_MERGEABLE
))
2643 if (ksm_scan
.address
< vma
->vm_start
)
2644 ksm_scan
.address
= vma
->vm_start
;
2646 ksm_scan
.address
= vma
->vm_end
;
2648 while (ksm_scan
.address
< vma
->vm_end
) {
2649 if (ksm_test_exit(mm
))
2651 *page
= follow_page(vma
, ksm_scan
.address
, FOLL_GET
);
2652 if (IS_ERR_OR_NULL(*page
)) {
2653 ksm_scan
.address
+= PAGE_SIZE
;
2657 if (is_zone_device_page(*page
))
2659 if (PageAnon(*page
)) {
2660 flush_anon_page(vma
, *page
, ksm_scan
.address
);
2661 flush_dcache_page(*page
);
2662 rmap_item
= get_next_rmap_item(mm_slot
,
2663 ksm_scan
.rmap_list
, ksm_scan
.address
);
2665 ksm_scan
.rmap_list
=
2666 &rmap_item
->rmap_list
;
2668 if (should_skip_rmap_item(*page
, rmap_item
))
2671 ksm_scan
.address
+= PAGE_SIZE
;
2674 mmap_read_unlock(mm
);
2679 ksm_scan
.address
+= PAGE_SIZE
;
2684 if (ksm_test_exit(mm
)) {
2686 ksm_scan
.address
= 0;
2687 ksm_scan
.rmap_list
= &mm_slot
->rmap_list
;
2690 * Nuke all the rmap_items that are above this current rmap:
2691 * because there were no VM_MERGEABLE vmas with such addresses.
2693 remove_trailing_rmap_items(ksm_scan
.rmap_list
);
2695 spin_lock(&ksm_mmlist_lock
);
2696 slot
= list_entry(mm_slot
->slot
.mm_node
.next
,
2697 struct mm_slot
, mm_node
);
2698 ksm_scan
.mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
2699 if (ksm_scan
.address
== 0) {
2701 * We've completed a full scan of all vmas, holding mmap_lock
2702 * throughout, and found no VM_MERGEABLE: so do the same as
2703 * __ksm_exit does to remove this mm from all our lists now.
2704 * This applies either when cleaning up after __ksm_exit
2705 * (but beware: we can reach here even before __ksm_exit),
2706 * or when all VM_MERGEABLE areas have been unmapped (and
2707 * mmap_lock then protects against race with MADV_MERGEABLE).
2709 hash_del(&mm_slot
->slot
.hash
);
2710 list_del(&mm_slot
->slot
.mm_node
);
2711 spin_unlock(&ksm_mmlist_lock
);
2713 mm_slot_free(mm_slot_cache
, mm_slot
);
2714 clear_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
2715 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
2716 mmap_read_unlock(mm
);
2719 mmap_read_unlock(mm
);
2721 * mmap_read_unlock(mm) first because after
2722 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2723 * already have been freed under us by __ksm_exit()
2724 * because the "mm_slot" is still hashed and
2725 * ksm_scan.mm_slot doesn't point to it anymore.
2727 spin_unlock(&ksm_mmlist_lock
);
2730 /* Repeat until we've completed scanning the whole list */
2731 mm_slot
= ksm_scan
.mm_slot
;
2732 if (mm_slot
!= &ksm_mm_head
)
2735 advisor_stop_scan();
2737 trace_ksm_stop_scan(ksm_scan
.seqnr
, ksm_rmap_items
);
2743 * ksm_do_scan - the ksm scanner main worker function.
2744 * @scan_npages: number of pages we want to scan before we return.
2746 static void ksm_do_scan(unsigned int scan_npages
)
2748 struct ksm_rmap_item
*rmap_item
;
2750 unsigned int npages
= scan_npages
;
2752 while (npages
-- && likely(!freezing(current
))) {
2754 rmap_item
= scan_get_next_rmap_item(&page
);
2757 cmp_and_merge_page(page
, rmap_item
);
2761 ksm_pages_scanned
+= scan_npages
- npages
;
2764 static int ksmd_should_run(void)
2766 return (ksm_run
& KSM_RUN_MERGE
) && !list_empty(&ksm_mm_head
.slot
.mm_node
);
2769 static int ksm_scan_thread(void *nothing
)
2771 unsigned int sleep_ms
;
2774 set_user_nice(current
, 5);
2776 while (!kthread_should_stop()) {
2777 mutex_lock(&ksm_thread_mutex
);
2778 wait_while_offlining();
2779 if (ksmd_should_run())
2780 ksm_do_scan(ksm_thread_pages_to_scan
);
2781 mutex_unlock(&ksm_thread_mutex
);
2783 if (ksmd_should_run()) {
2784 sleep_ms
= READ_ONCE(ksm_thread_sleep_millisecs
);
2785 wait_event_freezable_timeout(ksm_iter_wait
,
2786 sleep_ms
!= READ_ONCE(ksm_thread_sleep_millisecs
),
2787 msecs_to_jiffies(sleep_ms
));
2789 wait_event_freezable(ksm_thread_wait
,
2790 ksmd_should_run() || kthread_should_stop());
2796 static void __ksm_add_vma(struct vm_area_struct
*vma
)
2798 unsigned long vm_flags
= vma
->vm_flags
;
2800 if (vm_flags
& VM_MERGEABLE
)
2803 if (vma_ksm_compatible(vma
))
2804 vm_flags_set(vma
, VM_MERGEABLE
);
2807 static int __ksm_del_vma(struct vm_area_struct
*vma
)
2811 if (!(vma
->vm_flags
& VM_MERGEABLE
))
2814 if (vma
->anon_vma
) {
2815 err
= unmerge_ksm_pages(vma
, vma
->vm_start
, vma
->vm_end
, true);
2820 vm_flags_clear(vma
, VM_MERGEABLE
);
2824 * ksm_add_vma - Mark vma as mergeable if compatible
2826 * @vma: Pointer to vma
2828 void ksm_add_vma(struct vm_area_struct
*vma
)
2830 struct mm_struct
*mm
= vma
->vm_mm
;
2832 if (test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2836 static void ksm_add_vmas(struct mm_struct
*mm
)
2838 struct vm_area_struct
*vma
;
2840 VMA_ITERATOR(vmi
, mm
, 0);
2841 for_each_vma(vmi
, vma
)
2845 static int ksm_del_vmas(struct mm_struct
*mm
)
2847 struct vm_area_struct
*vma
;
2850 VMA_ITERATOR(vmi
, mm
, 0);
2851 for_each_vma(vmi
, vma
) {
2852 err
= __ksm_del_vma(vma
);
2860 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2863 * @mm: Pointer to mm
2865 * Returns 0 on success, otherwise error code
2867 int ksm_enable_merge_any(struct mm_struct
*mm
)
2871 if (test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2874 if (!test_bit(MMF_VM_MERGEABLE
, &mm
->flags
)) {
2875 err
= __ksm_enter(mm
);
2880 set_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
2887 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2888 * previously enabled via ksm_enable_merge_any().
2890 * Disabling merging implies unmerging any merged pages, like setting
2891 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
2892 * merging on all compatible VMA's remains enabled.
2894 * @mm: Pointer to mm
2896 * Returns 0 on success, otherwise error code
2898 int ksm_disable_merge_any(struct mm_struct
*mm
)
2902 if (!test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2905 err
= ksm_del_vmas(mm
);
2911 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
2915 int ksm_disable(struct mm_struct
*mm
)
2917 mmap_assert_write_locked(mm
);
2919 if (!test_bit(MMF_VM_MERGEABLE
, &mm
->flags
))
2921 if (test_bit(MMF_VM_MERGE_ANY
, &mm
->flags
))
2922 return ksm_disable_merge_any(mm
);
2923 return ksm_del_vmas(mm
);
2926 int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
2927 unsigned long end
, int advice
, unsigned long *vm_flags
)
2929 struct mm_struct
*mm
= vma
->vm_mm
;
2933 case MADV_MERGEABLE
:
2934 if (vma
->vm_flags
& VM_MERGEABLE
)
2936 if (!vma_ksm_compatible(vma
))
2939 if (!test_bit(MMF_VM_MERGEABLE
, &mm
->flags
)) {
2940 err
= __ksm_enter(mm
);
2945 *vm_flags
|= VM_MERGEABLE
;
2948 case MADV_UNMERGEABLE
:
2949 if (!(*vm_flags
& VM_MERGEABLE
))
2950 return 0; /* just ignore the advice */
2952 if (vma
->anon_vma
) {
2953 err
= unmerge_ksm_pages(vma
, start
, end
, true);
2958 *vm_flags
&= ~VM_MERGEABLE
;
2964 EXPORT_SYMBOL_GPL(ksm_madvise
);
2966 int __ksm_enter(struct mm_struct
*mm
)
2968 struct ksm_mm_slot
*mm_slot
;
2969 struct mm_slot
*slot
;
2972 mm_slot
= mm_slot_alloc(mm_slot_cache
);
2976 slot
= &mm_slot
->slot
;
2978 /* Check ksm_run too? Would need tighter locking */
2979 needs_wakeup
= list_empty(&ksm_mm_head
.slot
.mm_node
);
2981 spin_lock(&ksm_mmlist_lock
);
2982 mm_slot_insert(mm_slots_hash
, mm
, slot
);
2984 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2985 * insert just behind the scanning cursor, to let the area settle
2986 * down a little; when fork is followed by immediate exec, we don't
2987 * want ksmd to waste time setting up and tearing down an rmap_list.
2989 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2990 * scanning cursor, otherwise KSM pages in newly forked mms will be
2991 * missed: then we might as well insert at the end of the list.
2993 if (ksm_run
& KSM_RUN_UNMERGE
)
2994 list_add_tail(&slot
->mm_node
, &ksm_mm_head
.slot
.mm_node
);
2996 list_add_tail(&slot
->mm_node
, &ksm_scan
.mm_slot
->slot
.mm_node
);
2997 spin_unlock(&ksm_mmlist_lock
);
2999 set_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
3003 wake_up_interruptible(&ksm_thread_wait
);
3005 trace_ksm_enter(mm
);
3009 void __ksm_exit(struct mm_struct
*mm
)
3011 struct ksm_mm_slot
*mm_slot
;
3012 struct mm_slot
*slot
;
3013 int easy_to_free
= 0;
3016 * This process is exiting: if it's straightforward (as is the
3017 * case when ksmd was never running), free mm_slot immediately.
3018 * But if it's at the cursor or has rmap_items linked to it, use
3019 * mmap_lock to synchronize with any break_cows before pagetables
3020 * are freed, and leave the mm_slot on the list for ksmd to free.
3021 * Beware: ksm may already have noticed it exiting and freed the slot.
3024 spin_lock(&ksm_mmlist_lock
);
3025 slot
= mm_slot_lookup(mm_slots_hash
, mm
);
3026 mm_slot
= mm_slot_entry(slot
, struct ksm_mm_slot
, slot
);
3027 if (mm_slot
&& ksm_scan
.mm_slot
!= mm_slot
) {
3028 if (!mm_slot
->rmap_list
) {
3029 hash_del(&slot
->hash
);
3030 list_del(&slot
->mm_node
);
3033 list_move(&slot
->mm_node
,
3034 &ksm_scan
.mm_slot
->slot
.mm_node
);
3037 spin_unlock(&ksm_mmlist_lock
);
3040 mm_slot_free(mm_slot_cache
, mm_slot
);
3041 clear_bit(MMF_VM_MERGE_ANY
, &mm
->flags
);
3042 clear_bit(MMF_VM_MERGEABLE
, &mm
->flags
);
3044 } else if (mm_slot
) {
3045 mmap_write_lock(mm
);
3046 mmap_write_unlock(mm
);
3052 struct folio
*ksm_might_need_to_copy(struct folio
*folio
,
3053 struct vm_area_struct
*vma
, unsigned long addr
)
3055 struct page
*page
= folio_page(folio
, 0);
3056 struct anon_vma
*anon_vma
= folio_anon_vma(folio
);
3057 struct folio
*new_folio
;
3059 if (folio_test_large(folio
))
3062 if (folio_test_ksm(folio
)) {
3063 if (folio_stable_node(folio
) &&
3064 !(ksm_run
& KSM_RUN_UNMERGE
))
3065 return folio
; /* no need to copy it */
3066 } else if (!anon_vma
) {
3067 return folio
; /* no need to copy it */
3068 } else if (folio
->index
== linear_page_index(vma
, addr
) &&
3069 anon_vma
->root
== vma
->anon_vma
->root
) {
3070 return folio
; /* still no need to copy it */
3072 if (PageHWPoison(page
))
3073 return ERR_PTR(-EHWPOISON
);
3074 if (!folio_test_uptodate(folio
))
3075 return folio
; /* let do_swap_page report the error */
3077 new_folio
= vma_alloc_folio(GFP_HIGHUSER_MOVABLE
, 0, vma
, addr
, false);
3079 mem_cgroup_charge(new_folio
, vma
->vm_mm
, GFP_KERNEL
)) {
3080 folio_put(new_folio
);
3084 if (copy_mc_user_highpage(folio_page(new_folio
, 0), page
,
3086 folio_put(new_folio
);
3087 memory_failure_queue(folio_pfn(folio
), 0);
3088 return ERR_PTR(-EHWPOISON
);
3090 folio_set_dirty(new_folio
);
3091 __folio_mark_uptodate(new_folio
);
3092 __folio_set_locked(new_folio
);
3094 count_vm_event(KSM_SWPIN_COPY
);
3101 void rmap_walk_ksm(struct folio
*folio
, struct rmap_walk_control
*rwc
)
3103 struct ksm_stable_node
*stable_node
;
3104 struct ksm_rmap_item
*rmap_item
;
3105 int search_new_forks
= 0;
3107 VM_BUG_ON_FOLIO(!folio_test_ksm(folio
), folio
);
3110 * Rely on the page lock to protect against concurrent modifications
3111 * to that page's node of the stable tree.
3113 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
3115 stable_node
= folio_stable_node(folio
);
3119 hlist_for_each_entry(rmap_item
, &stable_node
->hlist
, hlist
) {
3120 struct anon_vma
*anon_vma
= rmap_item
->anon_vma
;
3121 struct anon_vma_chain
*vmac
;
3122 struct vm_area_struct
*vma
;
3125 if (!anon_vma_trylock_read(anon_vma
)) {
3126 if (rwc
->try_lock
) {
3127 rwc
->contended
= true;
3130 anon_vma_lock_read(anon_vma
);
3132 anon_vma_interval_tree_foreach(vmac
, &anon_vma
->rb_root
,
3139 /* Ignore the stable/unstable/sqnr flags */
3140 addr
= rmap_item
->address
& PAGE_MASK
;
3142 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
3145 * Initially we examine only the vma which covers this
3146 * rmap_item; but later, if there is still work to do,
3147 * we examine covering vmas in other mms: in case they
3148 * were forked from the original since ksmd passed.
3150 if ((rmap_item
->mm
== vma
->vm_mm
) == search_new_forks
)
3153 if (rwc
->invalid_vma
&& rwc
->invalid_vma(vma
, rwc
->arg
))
3156 if (!rwc
->rmap_one(folio
, vma
, addr
, rwc
->arg
)) {
3157 anon_vma_unlock_read(anon_vma
);
3160 if (rwc
->done
&& rwc
->done(folio
)) {
3161 anon_vma_unlock_read(anon_vma
);
3165 anon_vma_unlock_read(anon_vma
);
3167 if (!search_new_forks
++)
3171 #ifdef CONFIG_MEMORY_FAILURE
3173 * Collect processes when the error hit an ksm page.
3175 void collect_procs_ksm(struct page
*page
, struct list_head
*to_kill
,
3178 struct ksm_stable_node
*stable_node
;
3179 struct ksm_rmap_item
*rmap_item
;
3180 struct folio
*folio
= page_folio(page
);
3181 struct vm_area_struct
*vma
;
3182 struct task_struct
*tsk
;
3184 stable_node
= folio_stable_node(folio
);
3187 hlist_for_each_entry(rmap_item
, &stable_node
->hlist
, hlist
) {
3188 struct anon_vma
*av
= rmap_item
->anon_vma
;
3190 anon_vma_lock_read(av
);
3192 for_each_process(tsk
) {
3193 struct anon_vma_chain
*vmac
;
3195 struct task_struct
*t
=
3196 task_early_kill(tsk
, force_early
);
3199 anon_vma_interval_tree_foreach(vmac
, &av
->rb_root
, 0,
3203 if (vma
->vm_mm
== t
->mm
) {
3204 addr
= rmap_item
->address
& PAGE_MASK
;
3205 add_to_kill_ksm(t
, page
, vma
, to_kill
,
3211 anon_vma_unlock_read(av
);
3216 #ifdef CONFIG_MIGRATION
3217 void folio_migrate_ksm(struct folio
*newfolio
, struct folio
*folio
)
3219 struct ksm_stable_node
*stable_node
;
3221 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
3222 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio
), newfolio
);
3223 VM_BUG_ON_FOLIO(newfolio
->mapping
!= folio
->mapping
, newfolio
);
3225 stable_node
= folio_stable_node(folio
);
3227 VM_BUG_ON_FOLIO(stable_node
->kpfn
!= folio_pfn(folio
), folio
);
3228 stable_node
->kpfn
= folio_pfn(newfolio
);
3230 * newfolio->mapping was set in advance; now we need smp_wmb()
3231 * to make sure that the new stable_node->kpfn is visible
3232 * to get_ksm_page() before it can see that folio->mapping
3233 * has gone stale (or that folio_test_swapcache has been cleared).
3236 set_page_stable_node(&folio
->page
, NULL
);
3239 #endif /* CONFIG_MIGRATION */
3241 #ifdef CONFIG_MEMORY_HOTREMOVE
3242 static void wait_while_offlining(void)
3244 while (ksm_run
& KSM_RUN_OFFLINE
) {
3245 mutex_unlock(&ksm_thread_mutex
);
3246 wait_on_bit(&ksm_run
, ilog2(KSM_RUN_OFFLINE
),
3247 TASK_UNINTERRUPTIBLE
);
3248 mutex_lock(&ksm_thread_mutex
);
3252 static bool stable_node_dup_remove_range(struct ksm_stable_node
*stable_node
,
3253 unsigned long start_pfn
,
3254 unsigned long end_pfn
)
3256 if (stable_node
->kpfn
>= start_pfn
&&
3257 stable_node
->kpfn
< end_pfn
) {
3259 * Don't get_ksm_page, page has already gone:
3260 * which is why we keep kpfn instead of page*
3262 remove_node_from_stable_tree(stable_node
);
3268 static bool stable_node_chain_remove_range(struct ksm_stable_node
*stable_node
,
3269 unsigned long start_pfn
,
3270 unsigned long end_pfn
,
3271 struct rb_root
*root
)
3273 struct ksm_stable_node
*dup
;
3274 struct hlist_node
*hlist_safe
;
3276 if (!is_stable_node_chain(stable_node
)) {
3277 VM_BUG_ON(is_stable_node_dup(stable_node
));
3278 return stable_node_dup_remove_range(stable_node
, start_pfn
,
3282 hlist_for_each_entry_safe(dup
, hlist_safe
,
3283 &stable_node
->hlist
, hlist_dup
) {
3284 VM_BUG_ON(!is_stable_node_dup(dup
));
3285 stable_node_dup_remove_range(dup
, start_pfn
, end_pfn
);
3287 if (hlist_empty(&stable_node
->hlist
)) {
3288 free_stable_node_chain(stable_node
, root
);
3289 return true; /* notify caller that tree was rebalanced */
3294 static void ksm_check_stable_tree(unsigned long start_pfn
,
3295 unsigned long end_pfn
)
3297 struct ksm_stable_node
*stable_node
, *next
;
3298 struct rb_node
*node
;
3301 for (nid
= 0; nid
< ksm_nr_node_ids
; nid
++) {
3302 node
= rb_first(root_stable_tree
+ nid
);
3304 stable_node
= rb_entry(node
, struct ksm_stable_node
, node
);
3305 if (stable_node_chain_remove_range(stable_node
,
3309 node
= rb_first(root_stable_tree
+ nid
);
3311 node
= rb_next(node
);
3315 list_for_each_entry_safe(stable_node
, next
, &migrate_nodes
, list
) {
3316 if (stable_node
->kpfn
>= start_pfn
&&
3317 stable_node
->kpfn
< end_pfn
)
3318 remove_node_from_stable_tree(stable_node
);
3323 static int ksm_memory_callback(struct notifier_block
*self
,
3324 unsigned long action
, void *arg
)
3326 struct memory_notify
*mn
= arg
;
3329 case MEM_GOING_OFFLINE
:
3331 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
3332 * and remove_all_stable_nodes() while memory is going offline:
3333 * it is unsafe for them to touch the stable tree at this time.
3334 * But unmerge_ksm_pages(), rmap lookups and other entry points
3335 * which do not need the ksm_thread_mutex are all safe.
3337 mutex_lock(&ksm_thread_mutex
);
3338 ksm_run
|= KSM_RUN_OFFLINE
;
3339 mutex_unlock(&ksm_thread_mutex
);
3344 * Most of the work is done by page migration; but there might
3345 * be a few stable_nodes left over, still pointing to struct
3346 * pages which have been offlined: prune those from the tree,
3347 * otherwise get_ksm_page() might later try to access a
3348 * non-existent struct page.
3350 ksm_check_stable_tree(mn
->start_pfn
,
3351 mn
->start_pfn
+ mn
->nr_pages
);
3353 case MEM_CANCEL_OFFLINE
:
3354 mutex_lock(&ksm_thread_mutex
);
3355 ksm_run
&= ~KSM_RUN_OFFLINE
;
3356 mutex_unlock(&ksm_thread_mutex
);
3358 smp_mb(); /* wake_up_bit advises this */
3359 wake_up_bit(&ksm_run
, ilog2(KSM_RUN_OFFLINE
));
3365 static void wait_while_offlining(void)
3368 #endif /* CONFIG_MEMORY_HOTREMOVE */
3370 #ifdef CONFIG_PROC_FS
3371 long ksm_process_profit(struct mm_struct
*mm
)
3373 return (long)(mm
->ksm_merging_pages
+ mm
->ksm_zero_pages
) * PAGE_SIZE
-
3374 mm
->ksm_rmap_items
* sizeof(struct ksm_rmap_item
);
3376 #endif /* CONFIG_PROC_FS */
3380 * This all compiles without CONFIG_SYSFS, but is a waste of space.
3383 #define KSM_ATTR_RO(_name) \
3384 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3385 #define KSM_ATTR(_name) \
3386 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3388 static ssize_t
sleep_millisecs_show(struct kobject
*kobj
,
3389 struct kobj_attribute
*attr
, char *buf
)
3391 return sysfs_emit(buf
, "%u\n", ksm_thread_sleep_millisecs
);
3394 static ssize_t
sleep_millisecs_store(struct kobject
*kobj
,
3395 struct kobj_attribute
*attr
,
3396 const char *buf
, size_t count
)
3401 err
= kstrtouint(buf
, 10, &msecs
);
3405 ksm_thread_sleep_millisecs
= msecs
;
3406 wake_up_interruptible(&ksm_iter_wait
);
3410 KSM_ATTR(sleep_millisecs
);
3412 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
3413 struct kobj_attribute
*attr
, char *buf
)
3415 return sysfs_emit(buf
, "%u\n", ksm_thread_pages_to_scan
);
3418 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
3419 struct kobj_attribute
*attr
,
3420 const char *buf
, size_t count
)
3422 unsigned int nr_pages
;
3425 if (ksm_advisor
!= KSM_ADVISOR_NONE
)
3428 err
= kstrtouint(buf
, 10, &nr_pages
);
3432 ksm_thread_pages_to_scan
= nr_pages
;
3436 KSM_ATTR(pages_to_scan
);
3438 static ssize_t
run_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
3441 return sysfs_emit(buf
, "%lu\n", ksm_run
);
3444 static ssize_t
run_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
3445 const char *buf
, size_t count
)
3450 err
= kstrtouint(buf
, 10, &flags
);
3453 if (flags
> KSM_RUN_UNMERGE
)
3457 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
3458 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
3459 * breaking COW to free the pages_shared (but leaves mm_slots
3460 * on the list for when ksmd may be set running again).
3463 mutex_lock(&ksm_thread_mutex
);
3464 wait_while_offlining();
3465 if (ksm_run
!= flags
) {
3467 if (flags
& KSM_RUN_UNMERGE
) {
3468 set_current_oom_origin();
3469 err
= unmerge_and_remove_all_rmap_items();
3470 clear_current_oom_origin();
3472 ksm_run
= KSM_RUN_STOP
;
3477 mutex_unlock(&ksm_thread_mutex
);
3479 if (flags
& KSM_RUN_MERGE
)
3480 wake_up_interruptible(&ksm_thread_wait
);
3487 static ssize_t
merge_across_nodes_show(struct kobject
*kobj
,
3488 struct kobj_attribute
*attr
, char *buf
)
3490 return sysfs_emit(buf
, "%u\n", ksm_merge_across_nodes
);
3493 static ssize_t
merge_across_nodes_store(struct kobject
*kobj
,
3494 struct kobj_attribute
*attr
,
3495 const char *buf
, size_t count
)
3500 err
= kstrtoul(buf
, 10, &knob
);
3506 mutex_lock(&ksm_thread_mutex
);
3507 wait_while_offlining();
3508 if (ksm_merge_across_nodes
!= knob
) {
3509 if (ksm_pages_shared
|| remove_all_stable_nodes())
3511 else if (root_stable_tree
== one_stable_tree
) {
3512 struct rb_root
*buf
;
3514 * This is the first time that we switch away from the
3515 * default of merging across nodes: must now allocate
3516 * a buffer to hold as many roots as may be needed.
3517 * Allocate stable and unstable together:
3518 * MAXSMP NODES_SHIFT 10 will use 16kB.
3520 buf
= kcalloc(nr_node_ids
+ nr_node_ids
, sizeof(*buf
),
3522 /* Let us assume that RB_ROOT is NULL is zero */
3526 root_stable_tree
= buf
;
3527 root_unstable_tree
= buf
+ nr_node_ids
;
3528 /* Stable tree is empty but not the unstable */
3529 root_unstable_tree
[0] = one_unstable_tree
[0];
3533 ksm_merge_across_nodes
= knob
;
3534 ksm_nr_node_ids
= knob
? 1 : nr_node_ids
;
3537 mutex_unlock(&ksm_thread_mutex
);
3539 return err
? err
: count
;
3541 KSM_ATTR(merge_across_nodes
);
3544 static ssize_t
use_zero_pages_show(struct kobject
*kobj
,
3545 struct kobj_attribute
*attr
, char *buf
)
3547 return sysfs_emit(buf
, "%u\n", ksm_use_zero_pages
);
3549 static ssize_t
use_zero_pages_store(struct kobject
*kobj
,
3550 struct kobj_attribute
*attr
,
3551 const char *buf
, size_t count
)
3556 err
= kstrtobool(buf
, &value
);
3560 ksm_use_zero_pages
= value
;
3564 KSM_ATTR(use_zero_pages
);
3566 static ssize_t
max_page_sharing_show(struct kobject
*kobj
,
3567 struct kobj_attribute
*attr
, char *buf
)
3569 return sysfs_emit(buf
, "%u\n", ksm_max_page_sharing
);
3572 static ssize_t
max_page_sharing_store(struct kobject
*kobj
,
3573 struct kobj_attribute
*attr
,
3574 const char *buf
, size_t count
)
3579 err
= kstrtoint(buf
, 10, &knob
);
3583 * When a KSM page is created it is shared by 2 mappings. This
3584 * being a signed comparison, it implicitly verifies it's not
3590 if (READ_ONCE(ksm_max_page_sharing
) == knob
)
3593 mutex_lock(&ksm_thread_mutex
);
3594 wait_while_offlining();
3595 if (ksm_max_page_sharing
!= knob
) {
3596 if (ksm_pages_shared
|| remove_all_stable_nodes())
3599 ksm_max_page_sharing
= knob
;
3601 mutex_unlock(&ksm_thread_mutex
);
3603 return err
? err
: count
;
3605 KSM_ATTR(max_page_sharing
);
3607 static ssize_t
pages_scanned_show(struct kobject
*kobj
,
3608 struct kobj_attribute
*attr
, char *buf
)
3610 return sysfs_emit(buf
, "%lu\n", ksm_pages_scanned
);
3612 KSM_ATTR_RO(pages_scanned
);
3614 static ssize_t
pages_shared_show(struct kobject
*kobj
,
3615 struct kobj_attribute
*attr
, char *buf
)
3617 return sysfs_emit(buf
, "%lu\n", ksm_pages_shared
);
3619 KSM_ATTR_RO(pages_shared
);
3621 static ssize_t
pages_sharing_show(struct kobject
*kobj
,
3622 struct kobj_attribute
*attr
, char *buf
)
3624 return sysfs_emit(buf
, "%lu\n", ksm_pages_sharing
);
3626 KSM_ATTR_RO(pages_sharing
);
3628 static ssize_t
pages_unshared_show(struct kobject
*kobj
,
3629 struct kobj_attribute
*attr
, char *buf
)
3631 return sysfs_emit(buf
, "%lu\n", ksm_pages_unshared
);
3633 KSM_ATTR_RO(pages_unshared
);
3635 static ssize_t
pages_volatile_show(struct kobject
*kobj
,
3636 struct kobj_attribute
*attr
, char *buf
)
3638 long ksm_pages_volatile
;
3640 ksm_pages_volatile
= ksm_rmap_items
- ksm_pages_shared
3641 - ksm_pages_sharing
- ksm_pages_unshared
;
3643 * It was not worth any locking to calculate that statistic,
3644 * but it might therefore sometimes be negative: conceal that.
3646 if (ksm_pages_volatile
< 0)
3647 ksm_pages_volatile
= 0;
3648 return sysfs_emit(buf
, "%ld\n", ksm_pages_volatile
);
3650 KSM_ATTR_RO(pages_volatile
);
3652 static ssize_t
pages_skipped_show(struct kobject
*kobj
,
3653 struct kobj_attribute
*attr
, char *buf
)
3655 return sysfs_emit(buf
, "%lu\n", ksm_pages_skipped
);
3657 KSM_ATTR_RO(pages_skipped
);
3659 static ssize_t
ksm_zero_pages_show(struct kobject
*kobj
,
3660 struct kobj_attribute
*attr
, char *buf
)
3662 return sysfs_emit(buf
, "%ld\n", ksm_zero_pages
);
3664 KSM_ATTR_RO(ksm_zero_pages
);
3666 static ssize_t
general_profit_show(struct kobject
*kobj
,
3667 struct kobj_attribute
*attr
, char *buf
)
3669 long general_profit
;
3671 general_profit
= (ksm_pages_sharing
+ ksm_zero_pages
) * PAGE_SIZE
-
3672 ksm_rmap_items
* sizeof(struct ksm_rmap_item
);
3674 return sysfs_emit(buf
, "%ld\n", general_profit
);
3676 KSM_ATTR_RO(general_profit
);
3678 static ssize_t
stable_node_dups_show(struct kobject
*kobj
,
3679 struct kobj_attribute
*attr
, char *buf
)
3681 return sysfs_emit(buf
, "%lu\n", ksm_stable_node_dups
);
3683 KSM_ATTR_RO(stable_node_dups
);
3685 static ssize_t
stable_node_chains_show(struct kobject
*kobj
,
3686 struct kobj_attribute
*attr
, char *buf
)
3688 return sysfs_emit(buf
, "%lu\n", ksm_stable_node_chains
);
3690 KSM_ATTR_RO(stable_node_chains
);
3693 stable_node_chains_prune_millisecs_show(struct kobject
*kobj
,
3694 struct kobj_attribute
*attr
,
3697 return sysfs_emit(buf
, "%u\n", ksm_stable_node_chains_prune_millisecs
);
3701 stable_node_chains_prune_millisecs_store(struct kobject
*kobj
,
3702 struct kobj_attribute
*attr
,
3703 const char *buf
, size_t count
)
3708 err
= kstrtouint(buf
, 10, &msecs
);
3712 ksm_stable_node_chains_prune_millisecs
= msecs
;
3716 KSM_ATTR(stable_node_chains_prune_millisecs
);
3718 static ssize_t
full_scans_show(struct kobject
*kobj
,
3719 struct kobj_attribute
*attr
, char *buf
)
3721 return sysfs_emit(buf
, "%lu\n", ksm_scan
.seqnr
);
3723 KSM_ATTR_RO(full_scans
);
3725 static ssize_t
smart_scan_show(struct kobject
*kobj
,
3726 struct kobj_attribute
*attr
, char *buf
)
3728 return sysfs_emit(buf
, "%u\n", ksm_smart_scan
);
3731 static ssize_t
smart_scan_store(struct kobject
*kobj
,
3732 struct kobj_attribute
*attr
,
3733 const char *buf
, size_t count
)
3738 err
= kstrtobool(buf
, &value
);
3742 ksm_smart_scan
= value
;
3745 KSM_ATTR(smart_scan
);
3747 static ssize_t
advisor_mode_show(struct kobject
*kobj
,
3748 struct kobj_attribute
*attr
, char *buf
)
3752 if (ksm_advisor
== KSM_ADVISOR_NONE
)
3753 output
= "[none] scan-time";
3754 else if (ksm_advisor
== KSM_ADVISOR_SCAN_TIME
)
3755 output
= "none [scan-time]";
3757 return sysfs_emit(buf
, "%s\n", output
);
3760 static ssize_t
advisor_mode_store(struct kobject
*kobj
,
3761 struct kobj_attribute
*attr
, const char *buf
,
3764 enum ksm_advisor_type curr_advisor
= ksm_advisor
;
3766 if (sysfs_streq("scan-time", buf
))
3767 ksm_advisor
= KSM_ADVISOR_SCAN_TIME
;
3768 else if (sysfs_streq("none", buf
))
3769 ksm_advisor
= KSM_ADVISOR_NONE
;
3773 /* Set advisor default values */
3774 if (curr_advisor
!= ksm_advisor
)
3775 set_advisor_defaults();
3779 KSM_ATTR(advisor_mode
);
3781 static ssize_t
advisor_max_cpu_show(struct kobject
*kobj
,
3782 struct kobj_attribute
*attr
, char *buf
)
3784 return sysfs_emit(buf
, "%u\n", ksm_advisor_max_cpu
);
3787 static ssize_t
advisor_max_cpu_store(struct kobject
*kobj
,
3788 struct kobj_attribute
*attr
,
3789 const char *buf
, size_t count
)
3792 unsigned long value
;
3794 err
= kstrtoul(buf
, 10, &value
);
3798 ksm_advisor_max_cpu
= value
;
3801 KSM_ATTR(advisor_max_cpu
);
3803 static ssize_t
advisor_min_pages_to_scan_show(struct kobject
*kobj
,
3804 struct kobj_attribute
*attr
, char *buf
)
3806 return sysfs_emit(buf
, "%lu\n", ksm_advisor_min_pages_to_scan
);
3809 static ssize_t
advisor_min_pages_to_scan_store(struct kobject
*kobj
,
3810 struct kobj_attribute
*attr
,
3811 const char *buf
, size_t count
)
3814 unsigned long value
;
3816 err
= kstrtoul(buf
, 10, &value
);
3820 ksm_advisor_min_pages_to_scan
= value
;
3823 KSM_ATTR(advisor_min_pages_to_scan
);
3825 static ssize_t
advisor_max_pages_to_scan_show(struct kobject
*kobj
,
3826 struct kobj_attribute
*attr
, char *buf
)
3828 return sysfs_emit(buf
, "%lu\n", ksm_advisor_max_pages_to_scan
);
3831 static ssize_t
advisor_max_pages_to_scan_store(struct kobject
*kobj
,
3832 struct kobj_attribute
*attr
,
3833 const char *buf
, size_t count
)
3836 unsigned long value
;
3838 err
= kstrtoul(buf
, 10, &value
);
3842 ksm_advisor_max_pages_to_scan
= value
;
3845 KSM_ATTR(advisor_max_pages_to_scan
);
3847 static ssize_t
advisor_target_scan_time_show(struct kobject
*kobj
,
3848 struct kobj_attribute
*attr
, char *buf
)
3850 return sysfs_emit(buf
, "%lu\n", ksm_advisor_target_scan_time
);
3853 static ssize_t
advisor_target_scan_time_store(struct kobject
*kobj
,
3854 struct kobj_attribute
*attr
,
3855 const char *buf
, size_t count
)
3858 unsigned long value
;
3860 err
= kstrtoul(buf
, 10, &value
);
3866 ksm_advisor_target_scan_time
= value
;
3869 KSM_ATTR(advisor_target_scan_time
);
3871 static struct attribute
*ksm_attrs
[] = {
3872 &sleep_millisecs_attr
.attr
,
3873 &pages_to_scan_attr
.attr
,
3875 &pages_scanned_attr
.attr
,
3876 &pages_shared_attr
.attr
,
3877 &pages_sharing_attr
.attr
,
3878 &pages_unshared_attr
.attr
,
3879 &pages_volatile_attr
.attr
,
3880 &pages_skipped_attr
.attr
,
3881 &ksm_zero_pages_attr
.attr
,
3882 &full_scans_attr
.attr
,
3884 &merge_across_nodes_attr
.attr
,
3886 &max_page_sharing_attr
.attr
,
3887 &stable_node_chains_attr
.attr
,
3888 &stable_node_dups_attr
.attr
,
3889 &stable_node_chains_prune_millisecs_attr
.attr
,
3890 &use_zero_pages_attr
.attr
,
3891 &general_profit_attr
.attr
,
3892 &smart_scan_attr
.attr
,
3893 &advisor_mode_attr
.attr
,
3894 &advisor_max_cpu_attr
.attr
,
3895 &advisor_min_pages_to_scan_attr
.attr
,
3896 &advisor_max_pages_to_scan_attr
.attr
,
3897 &advisor_target_scan_time_attr
.attr
,
3901 static const struct attribute_group ksm_attr_group
= {
3905 #endif /* CONFIG_SYSFS */
3907 static int __init
ksm_init(void)
3909 struct task_struct
*ksm_thread
;
3912 /* The correct value depends on page size and endianness */
3913 zero_checksum
= calc_checksum(ZERO_PAGE(0));
3914 /* Default to false for backwards compatibility */
3915 ksm_use_zero_pages
= false;
3917 err
= ksm_slab_init();
3921 ksm_thread
= kthread_run(ksm_scan_thread
, NULL
, "ksmd");
3922 if (IS_ERR(ksm_thread
)) {
3923 pr_err("ksm: creating kthread failed\n");
3924 err
= PTR_ERR(ksm_thread
);
3929 err
= sysfs_create_group(mm_kobj
, &ksm_attr_group
);
3931 pr_err("ksm: register sysfs failed\n");
3932 kthread_stop(ksm_thread
);
3936 ksm_run
= KSM_RUN_MERGE
; /* no way for user to start it */
3938 #endif /* CONFIG_SYSFS */
3940 #ifdef CONFIG_MEMORY_HOTREMOVE
3941 /* There is no significance to this priority 100 */
3942 hotplug_memory_notifier(ksm_memory_callback
, KSM_CALLBACK_PRI
);
3951 subsys_initcall(ksm_init
);