1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON Primitives for Virtual Address Spaces
5 * Author: SeongJae Park <sjpark@amazon.de>
8 #define pr_fmt(fmt) "damon-va: " fmt
10 #include <asm-generic/mman-common.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
18 #include "ops-common.h"
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
26 * 't->pid' should be the pointer to the relevant 'struct pid' having reference
27 * count. Caller must put the returned task, unless it is NULL.
29 static inline struct task_struct
*damon_get_task_struct(struct damon_target
*t
)
31 return get_pid_task(t
->pid
, PIDTYPE_PID
);
35 * Get the mm_struct of the given target
37 * Caller _must_ put the mm_struct after use, unless it is NULL.
39 * Returns the mm_struct of the target on success, NULL on failure
41 static struct mm_struct
*damon_get_mm(struct damon_target
*t
)
43 struct task_struct
*task
;
46 task
= damon_get_task_struct(t
);
50 mm
= get_task_mm(task
);
51 put_task_struct(task
);
56 * Functions for the initial monitoring target regions construction
60 * Size-evenly split a region into 'nr_pieces' small regions
62 * Returns 0 on success, or negative error code otherwise.
64 static int damon_va_evenly_split_region(struct damon_target
*t
,
65 struct damon_region
*r
, unsigned int nr_pieces
)
67 unsigned long sz_orig
, sz_piece
, orig_end
;
68 struct damon_region
*n
= NULL
, *next
;
75 sz_orig
= damon_sz_region(r
);
76 sz_piece
= ALIGN_DOWN(sz_orig
/ nr_pieces
, DAMON_MIN_REGION
);
81 r
->ar
.end
= r
->ar
.start
+ sz_piece
;
82 next
= damon_next_region(r
);
83 for (start
= r
->ar
.end
; start
+ sz_piece
<= orig_end
;
85 n
= damon_new_region(start
, start
+ sz_piece
);
88 damon_insert_region(n
, r
, next
, t
);
91 /* complement last region for possible rounding error */
98 static unsigned long sz_range(struct damon_addr_range
*r
)
100 return r
->end
- r
->start
;
104 * Find three regions separated by two biggest unmapped regions
106 * vma the head vma of the target address space
107 * regions an array of three address ranges that results will be saved
109 * This function receives an address space and finds three regions in it which
110 * separated by the two biggest unmapped regions in the space. Please refer to
111 * below comments of '__damon_va_init_regions()' function to know why this is
114 * Returns 0 if success, or negative error code otherwise.
116 static int __damon_va_three_regions(struct mm_struct
*mm
,
117 struct damon_addr_range regions
[3])
119 struct damon_addr_range first_gap
= {0}, second_gap
= {0};
120 VMA_ITERATOR(vmi
, mm
, 0);
121 struct vm_area_struct
*vma
, *prev
= NULL
;
125 * Find the two biggest gaps so that first_gap > second_gap > others.
126 * If this is too slow, it can be optimised to examine the maple
129 for_each_vma(vmi
, vma
) {
133 start
= vma
->vm_start
;
136 gap
= vma
->vm_start
- prev
->vm_end
;
138 if (gap
> sz_range(&first_gap
)) {
139 second_gap
= first_gap
;
140 first_gap
.start
= prev
->vm_end
;
141 first_gap
.end
= vma
->vm_start
;
142 } else if (gap
> sz_range(&second_gap
)) {
143 second_gap
.start
= prev
->vm_end
;
144 second_gap
.end
= vma
->vm_start
;
150 if (!sz_range(&second_gap
) || !sz_range(&first_gap
))
153 /* Sort the two biggest gaps by address */
154 if (first_gap
.start
> second_gap
.start
)
155 swap(first_gap
, second_gap
);
157 /* Store the result */
158 regions
[0].start
= ALIGN(start
, DAMON_MIN_REGION
);
159 regions
[0].end
= ALIGN(first_gap
.start
, DAMON_MIN_REGION
);
160 regions
[1].start
= ALIGN(first_gap
.end
, DAMON_MIN_REGION
);
161 regions
[1].end
= ALIGN(second_gap
.start
, DAMON_MIN_REGION
);
162 regions
[2].start
= ALIGN(second_gap
.end
, DAMON_MIN_REGION
);
163 regions
[2].end
= ALIGN(prev
->vm_end
, DAMON_MIN_REGION
);
169 * Get the three regions in the given target (task)
171 * Returns 0 on success, negative error code otherwise.
173 static int damon_va_three_regions(struct damon_target
*t
,
174 struct damon_addr_range regions
[3])
176 struct mm_struct
*mm
;
179 mm
= damon_get_mm(t
);
184 rc
= __damon_va_three_regions(mm
, regions
);
185 mmap_read_unlock(mm
);
192 * Initialize the monitoring target regions for the given target (task)
196 * Because only a number of small portions of the entire address space
197 * is actually mapped to the memory and accessed, monitoring the unmapped
198 * regions is wasteful. That said, because we can deal with small noises,
199 * tracking every mapping is not strictly required but could even incur a high
200 * overhead if the mapping frequently changes or the number of mappings is
201 * high. The adaptive regions adjustment mechanism will further help to deal
202 * with the noise by simply identifying the unmapped areas as a region that
203 * has no access. Moreover, applying the real mappings that would have many
204 * unmapped areas inside will make the adaptive mechanism quite complex. That
205 * said, too huge unmapped areas inside the monitoring target should be removed
206 * to not take the time for the adaptive mechanism.
208 * For the reason, we convert the complex mappings to three distinct regions
209 * that cover every mapped area of the address space. Also the two gaps
210 * between the three regions are the two biggest unmapped areas in the given
211 * address space. In detail, this function first identifies the start and the
212 * end of the mappings and the two biggest unmapped areas of the address space.
213 * Then, it constructs the three regions as below:
215 * [mappings[0]->start, big_two_unmapped_areas[0]->start)
216 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
217 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
219 * As usual memory map of processes is as below, the gap between the heap and
220 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
221 * region and the stack will be two biggest unmapped regions. Because these
222 * gaps are exceptionally huge areas in usual address space, excluding these
223 * two biggest unmapped regions will be sufficient to make a trade-off.
226 * <BIG UNMAPPED REGION 1>
227 * <uppermost mmap()-ed region>
228 * (other mmap()-ed regions and small unmapped regions)
229 * <lowermost mmap()-ed region>
230 * <BIG UNMAPPED REGION 2>
233 static void __damon_va_init_regions(struct damon_ctx
*ctx
,
234 struct damon_target
*t
)
236 struct damon_target
*ti
;
237 struct damon_region
*r
;
238 struct damon_addr_range regions
[3];
239 unsigned long sz
= 0, nr_pieces
;
242 if (damon_va_three_regions(t
, regions
)) {
243 damon_for_each_target(ti
, ctx
) {
248 pr_debug("Failed to get three regions of %dth target\n", tidx
);
252 for (i
= 0; i
< 3; i
++)
253 sz
+= regions
[i
].end
- regions
[i
].start
;
254 if (ctx
->attrs
.min_nr_regions
)
255 sz
/= ctx
->attrs
.min_nr_regions
;
256 if (sz
< DAMON_MIN_REGION
)
257 sz
= DAMON_MIN_REGION
;
259 /* Set the initial three regions of the target */
260 for (i
= 0; i
< 3; i
++) {
261 r
= damon_new_region(regions
[i
].start
, regions
[i
].end
);
263 pr_err("%d'th init region creation failed\n", i
);
266 damon_add_region(r
, t
);
268 nr_pieces
= (regions
[i
].end
- regions
[i
].start
) / sz
;
269 damon_va_evenly_split_region(t
, r
, nr_pieces
);
273 /* Initialize '->regions_list' of every target (task) */
274 static void damon_va_init(struct damon_ctx
*ctx
)
276 struct damon_target
*t
;
278 damon_for_each_target(t
, ctx
) {
279 /* the user may set the target regions as they want */
280 if (!damon_nr_regions(t
))
281 __damon_va_init_regions(ctx
, t
);
286 * Update regions for current memory mappings
288 static void damon_va_update(struct damon_ctx
*ctx
)
290 struct damon_addr_range three_regions
[3];
291 struct damon_target
*t
;
293 damon_for_each_target(t
, ctx
) {
294 if (damon_va_three_regions(t
, three_regions
))
296 damon_set_regions(t
, three_regions
, 3);
300 static int damon_mkold_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
301 unsigned long next
, struct mm_walk
*walk
)
307 if (pmd_trans_huge(pmdp_get(pmd
))) {
308 ptl
= pmd_lock(walk
->mm
, pmd
);
309 pmde
= pmdp_get(pmd
);
311 if (!pmd_present(pmde
)) {
316 if (pmd_trans_huge(pmde
)) {
317 damon_pmdp_mkold(pmd
, walk
->vma
, addr
);
324 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
326 walk
->action
= ACTION_AGAIN
;
329 if (!pte_present(ptep_get(pte
)))
331 damon_ptep_mkold(pte
, walk
->vma
, addr
);
333 pte_unmap_unlock(pte
, ptl
);
337 #ifdef CONFIG_HUGETLB_PAGE
338 static void damon_hugetlb_mkold(pte_t
*pte
, struct mm_struct
*mm
,
339 struct vm_area_struct
*vma
, unsigned long addr
)
341 bool referenced
= false;
342 pte_t entry
= huge_ptep_get(pte
);
343 struct folio
*folio
= pfn_folio(pte_pfn(entry
));
347 if (pte_young(entry
)) {
349 entry
= pte_mkold(entry
);
350 set_huge_pte_at(mm
, addr
, pte
, entry
);
353 #ifdef CONFIG_MMU_NOTIFIER
354 if (mmu_notifier_clear_young(mm
, addr
,
355 addr
+ huge_page_size(hstate_vma(vma
))))
357 #endif /* CONFIG_MMU_NOTIFIER */
360 folio_set_young(folio
);
362 folio_set_idle(folio
);
366 static int damon_mkold_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
367 unsigned long addr
, unsigned long end
,
368 struct mm_walk
*walk
)
370 struct hstate
*h
= hstate_vma(walk
->vma
);
374 ptl
= huge_pte_lock(h
, walk
->mm
, pte
);
375 entry
= huge_ptep_get(pte
);
376 if (!pte_present(entry
))
379 damon_hugetlb_mkold(pte
, walk
->mm
, walk
->vma
, addr
);
386 #define damon_mkold_hugetlb_entry NULL
387 #endif /* CONFIG_HUGETLB_PAGE */
389 static const struct mm_walk_ops damon_mkold_ops
= {
390 .pmd_entry
= damon_mkold_pmd_entry
,
391 .hugetlb_entry
= damon_mkold_hugetlb_entry
,
392 .walk_lock
= PGWALK_RDLOCK
,
395 static void damon_va_mkold(struct mm_struct
*mm
, unsigned long addr
)
398 walk_page_range(mm
, addr
, addr
+ 1, &damon_mkold_ops
, NULL
);
399 mmap_read_unlock(mm
);
403 * Functions for the access checking of the regions
406 static void __damon_va_prepare_access_check(struct mm_struct
*mm
,
407 struct damon_region
*r
)
409 r
->sampling_addr
= damon_rand(r
->ar
.start
, r
->ar
.end
);
411 damon_va_mkold(mm
, r
->sampling_addr
);
414 static void damon_va_prepare_access_checks(struct damon_ctx
*ctx
)
416 struct damon_target
*t
;
417 struct mm_struct
*mm
;
418 struct damon_region
*r
;
420 damon_for_each_target(t
, ctx
) {
421 mm
= damon_get_mm(t
);
424 damon_for_each_region(r
, t
)
425 __damon_va_prepare_access_check(mm
, r
);
430 struct damon_young_walk_private
{
431 /* size of the folio for the access checked virtual memory address */
432 unsigned long *folio_sz
;
436 static int damon_young_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
437 unsigned long next
, struct mm_walk
*walk
)
443 struct damon_young_walk_private
*priv
= walk
->private;
445 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
446 if (pmd_trans_huge(pmdp_get(pmd
))) {
449 ptl
= pmd_lock(walk
->mm
, pmd
);
450 pmde
= pmdp_get(pmd
);
452 if (!pmd_present(pmde
)) {
457 if (!pmd_trans_huge(pmde
)) {
461 folio
= damon_get_folio(pmd_pfn(pmde
));
464 if (pmd_young(pmde
) || !folio_test_idle(folio
) ||
465 mmu_notifier_test_young(walk
->mm
,
468 *priv
->folio_sz
= HPAGE_PMD_SIZE
;
476 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
478 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
480 walk
->action
= ACTION_AGAIN
;
483 ptent
= ptep_get(pte
);
484 if (!pte_present(ptent
))
486 folio
= damon_get_folio(pte_pfn(ptent
));
489 if (pte_young(ptent
) || !folio_test_idle(folio
) ||
490 mmu_notifier_test_young(walk
->mm
, addr
))
492 *priv
->folio_sz
= folio_size(folio
);
495 pte_unmap_unlock(pte
, ptl
);
499 #ifdef CONFIG_HUGETLB_PAGE
500 static int damon_young_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
501 unsigned long addr
, unsigned long end
,
502 struct mm_walk
*walk
)
504 struct damon_young_walk_private
*priv
= walk
->private;
505 struct hstate
*h
= hstate_vma(walk
->vma
);
510 ptl
= huge_pte_lock(h
, walk
->mm
, pte
);
511 entry
= huge_ptep_get(pte
);
512 if (!pte_present(entry
))
515 folio
= pfn_folio(pte_pfn(entry
));
518 if (pte_young(entry
) || !folio_test_idle(folio
) ||
519 mmu_notifier_test_young(walk
->mm
, addr
))
521 *priv
->folio_sz
= huge_page_size(h
);
530 #define damon_young_hugetlb_entry NULL
531 #endif /* CONFIG_HUGETLB_PAGE */
533 static const struct mm_walk_ops damon_young_ops
= {
534 .pmd_entry
= damon_young_pmd_entry
,
535 .hugetlb_entry
= damon_young_hugetlb_entry
,
536 .walk_lock
= PGWALK_RDLOCK
,
539 static bool damon_va_young(struct mm_struct
*mm
, unsigned long addr
,
540 unsigned long *folio_sz
)
542 struct damon_young_walk_private arg
= {
543 .folio_sz
= folio_sz
,
548 walk_page_range(mm
, addr
, addr
+ 1, &damon_young_ops
, &arg
);
549 mmap_read_unlock(mm
);
554 * Check whether the region was accessed after the last preparation
556 * mm 'mm_struct' for the given virtual address space
557 * r the region to be checked
559 static void __damon_va_check_access(struct mm_struct
*mm
,
560 struct damon_region
*r
, bool same_target
)
562 static unsigned long last_addr
;
563 static unsigned long last_folio_sz
= PAGE_SIZE
;
564 static bool last_accessed
;
566 /* If the region is in the last checked page, reuse the result */
567 if (same_target
&& (ALIGN_DOWN(last_addr
, last_folio_sz
) ==
568 ALIGN_DOWN(r
->sampling_addr
, last_folio_sz
))) {
574 last_accessed
= damon_va_young(mm
, r
->sampling_addr
, &last_folio_sz
);
578 last_addr
= r
->sampling_addr
;
581 static unsigned int damon_va_check_accesses(struct damon_ctx
*ctx
)
583 struct damon_target
*t
;
584 struct mm_struct
*mm
;
585 struct damon_region
*r
;
586 unsigned int max_nr_accesses
= 0;
589 damon_for_each_target(t
, ctx
) {
590 mm
= damon_get_mm(t
);
594 damon_for_each_region(r
, t
) {
595 __damon_va_check_access(mm
, r
, same_target
);
596 max_nr_accesses
= max(r
->nr_accesses
, max_nr_accesses
);
602 return max_nr_accesses
;
606 * Functions for the target validity check and cleanup
609 static bool damon_va_target_valid(struct damon_target
*t
)
611 struct task_struct
*task
;
613 task
= damon_get_task_struct(t
);
615 put_task_struct(task
);
622 #ifndef CONFIG_ADVISE_SYSCALLS
623 static unsigned long damos_madvise(struct damon_target
*target
,
624 struct damon_region
*r
, int behavior
)
629 static unsigned long damos_madvise(struct damon_target
*target
,
630 struct damon_region
*r
, int behavior
)
632 struct mm_struct
*mm
;
633 unsigned long start
= PAGE_ALIGN(r
->ar
.start
);
634 unsigned long len
= PAGE_ALIGN(damon_sz_region(r
));
635 unsigned long applied
;
637 mm
= damon_get_mm(target
);
641 applied
= do_madvise(mm
, start
, len
, behavior
) ? 0 : len
;
646 #endif /* CONFIG_ADVISE_SYSCALLS */
648 static unsigned long damon_va_apply_scheme(struct damon_ctx
*ctx
,
649 struct damon_target
*t
, struct damon_region
*r
,
650 struct damos
*scheme
)
654 switch (scheme
->action
) {
656 madv_action
= MADV_WILLNEED
;
659 madv_action
= MADV_COLD
;
662 madv_action
= MADV_PAGEOUT
;
665 madv_action
= MADV_HUGEPAGE
;
667 case DAMOS_NOHUGEPAGE
:
668 madv_action
= MADV_NOHUGEPAGE
;
674 * DAMOS actions that are not yet supported by 'vaddr'.
679 return damos_madvise(t
, r
, madv_action
);
682 static int damon_va_scheme_score(struct damon_ctx
*context
,
683 struct damon_target
*t
, struct damon_region
*r
,
684 struct damos
*scheme
)
687 switch (scheme
->action
) {
689 return damon_cold_score(context
, r
, scheme
);
694 return DAMOS_MAX_SCORE
;
697 static int __init
damon_va_initcall(void)
699 struct damon_operations ops
= {
700 .id
= DAMON_OPS_VADDR
,
701 .init
= damon_va_init
,
702 .update
= damon_va_update
,
703 .prepare_access_checks
= damon_va_prepare_access_checks
,
704 .check_accesses
= damon_va_check_accesses
,
705 .reset_aggregated
= NULL
,
706 .target_valid
= damon_va_target_valid
,
708 .apply_scheme
= damon_va_apply_scheme
,
709 .get_scheme_score
= damon_va_scheme_score
,
711 /* ops for fixed virtual address ranges */
712 struct damon_operations ops_fvaddr
= ops
;
715 /* Don't set the monitoring target regions for the entire mapping */
716 ops_fvaddr
.id
= DAMON_OPS_FVADDR
;
717 ops_fvaddr
.init
= NULL
;
718 ops_fvaddr
.update
= NULL
;
720 err
= damon_register_ops(&ops
);
723 return damon_register_ops(&ops_fvaddr
);
726 subsys_initcall(damon_va_initcall
);
728 #include "vaddr-test.h"