]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
57276a84bb56e387f20b14d557affb408f804850
[thirdparty/kernel/stable-queue.git] /
1 From 4aab2be0983031a05cb4a19696c9da5749523426 Mon Sep 17 00:00:00 2001
2 From: Vijay Balakrishna <vijayb@linux.microsoft.com>
3 Date: Sat, 10 Oct 2020 23:16:40 -0700
4 Subject: mm: khugepaged: recalculate min_free_kbytes after memory hotplug as expected by khugepaged
5
6 From: Vijay Balakrishna <vijayb@linux.microsoft.com>
7
8 commit 4aab2be0983031a05cb4a19696c9da5749523426 upstream.
9
10 When memory is hotplug added or removed the min_free_kbytes should be
11 recalculated based on what is expected by khugepaged. Currently after
12 hotplug, min_free_kbytes will be set to a lower default and higher
13 default set when THP enabled is lost.
14
15 This change restores min_free_kbytes as expected for THP consumers.
16
17 [vijayb@linux.microsoft.com: v5]
18 Link: https://lkml.kernel.org/r/1601398153-5517-1-git-send-email-vijayb@linux.microsoft.com
19
20 Fixes: f000565adb77 ("thp: set recommended min free kbytes")
21 Signed-off-by: Vijay Balakrishna <vijayb@linux.microsoft.com>
22 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
23 Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
24 Acked-by: Michal Hocko <mhocko@suse.com>
25 Cc: Allen Pais <apais@microsoft.com>
26 Cc: Andrea Arcangeli <aarcange@redhat.com>
27 Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
28 Cc: Oleg Nesterov <oleg@redhat.com>
29 Cc: Song Liu <songliubraving@fb.com>
30 Cc: <stable@vger.kernel.org>
31 Link: https://lkml.kernel.org/r/1600305709-2319-2-git-send-email-vijayb@linux.microsoft.com
32 Link: https://lkml.kernel.org/r/1600204258-13683-1-git-send-email-vijayb@linux.microsoft.com
33 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
34 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
35
36 ---
37 include/linux/khugepaged.h | 5 +++++
38 mm/khugepaged.c | 13 +++++++++++--
39 mm/page_alloc.c | 3 +++
40 3 files changed, 19 insertions(+), 2 deletions(-)
41
42 --- a/include/linux/khugepaged.h
43 +++ b/include/linux/khugepaged.h
44 @@ -13,6 +13,7 @@ extern int __khugepaged_enter(struct mm_
45 extern void __khugepaged_exit(struct mm_struct *mm);
46 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
47 unsigned long vm_flags);
48 +extern void khugepaged_min_free_kbytes_update(void);
49
50 #define khugepaged_enabled() \
51 (transparent_hugepage_flags & \
52 @@ -70,6 +71,10 @@ static inline int khugepaged_enter_vma_m
53 {
54 return 0;
55 }
56 +
57 +static inline void khugepaged_min_free_kbytes_update(void)
58 +{
59 +}
60 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
61
62 #endif /* _LINUX_KHUGEPAGED_H */
63 --- a/mm/khugepaged.c
64 +++ b/mm/khugepaged.c
65 @@ -50,6 +50,9 @@ enum scan_result {
66 #define CREATE_TRACE_POINTS
67 #include <trace/events/huge_memory.h>
68
69 +static struct task_struct *khugepaged_thread __read_mostly;
70 +static DEFINE_MUTEX(khugepaged_mutex);
71 +
72 /* default scan 8*512 pte (or vmas) every 30 second */
73 static unsigned int khugepaged_pages_to_scan __read_mostly;
74 static unsigned int khugepaged_pages_collapsed;
75 @@ -1948,8 +1951,6 @@ static void set_recommended_min_free_kby
76
77 int start_stop_khugepaged(void)
78 {
79 - static struct task_struct *khugepaged_thread __read_mostly;
80 - static DEFINE_MUTEX(khugepaged_mutex);
81 int err = 0;
82
83 mutex_lock(&khugepaged_mutex);
84 @@ -1976,3 +1977,11 @@ fail:
85 mutex_unlock(&khugepaged_mutex);
86 return err;
87 }
88 +
89 +void khugepaged_min_free_kbytes_update(void)
90 +{
91 + mutex_lock(&khugepaged_mutex);
92 + if (khugepaged_enabled() && khugepaged_thread)
93 + set_recommended_min_free_kbytes();
94 + mutex_unlock(&khugepaged_mutex);
95 +}
96 --- a/mm/page_alloc.c
97 +++ b/mm/page_alloc.c
98 @@ -64,6 +64,7 @@
99 #include <linux/page_owner.h>
100 #include <linux/kthread.h>
101 #include <linux/memcontrol.h>
102 +#include <linux/khugepaged.h>
103
104 #include <asm/sections.h>
105 #include <asm/tlbflush.h>
106 @@ -6785,6 +6786,8 @@ int __meminit init_per_zone_wmark_min(vo
107 setup_min_slab_ratio();
108 #endif
109
110 + khugepaged_min_free_kbytes_update();
111 +
112 return 0;
113 }
114 postcore_initcall(init_per_zone_wmark_min)