]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
54d445b3c615a0fb49d05d5ec9551dcdbe022d1e
[thirdparty/kernel/stable-queue.git] /
1 From ff9e6d0148913f65e252eacb30cee895cc9f18e0 Mon Sep 17 00:00:00 2001
2 From: Sasha Levin <sashal@kernel.org>
3 Date: Tue, 30 Mar 2021 18:19:08 +0000
4 Subject: mem_cgroup: make sure moving_account, move_lock_task and stat_cpu in
5 the same cacheline
6
7 From: Aaron Lu <aaron.lu@intel.com>
8
9 commit e81bf9793b1861d74953ef041b4f6c7faecc2dbd upstream.
10
11 The LKP robot found a 27% will-it-scale/page_fault3 performance
12 regression regarding commit e27be240df53("mm: memcg: make sure
13 memory.events is uptodate when waking pollers").
14
15 What the test does is:
16 1 mkstemp() a 128M file on a tmpfs;
17 2 start $nr_cpu processes, each to loop the following:
18 2.1 mmap() this file in shared write mode;
19 2.2 write 0 to this file in a PAGE_SIZE step till the end of the file;
20 2.3 unmap() this file and repeat this process.
21 3 After 5 minutes, check how many loops they managed to complete, the
22 higher the better.
23
24 The commit itself looks innocent enough as it merely changed some event
25 counting mechanism and this test didn't trigger those events at all.
26 Perf shows increased cycles spent on accessing root_mem_cgroup->stat_cpu
27 in count_memcg_event_mm()(called by handle_mm_fault()) and in
28 __mod_memcg_state() called by page_add_file_rmap(). So it's likely due
29 to the changed layout of 'struct mem_cgroup' that either make stat_cpu
30 falling into a constantly modifying cacheline or some hot fields stop
31 being in the same cacheline.
32
33 I verified this by moving memory_events[] back to where it was:
34
35 : --- a/include/linux/memcontrol.h
36 : +++ b/include/linux/memcontrol.h
37 : @@ -205,7 +205,6 @@ struct mem_cgroup {
38 : int oom_kill_disable;
39 :
40 : /* memory.events */
41 : - atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
42 : struct cgroup_file events_file;
43 :
44 : /* protect arrays of thresholds */
45 : @@ -238,6 +237,7 @@ struct mem_cgroup {
46 : struct mem_cgroup_stat_cpu __percpu *stat_cpu;
47 : atomic_long_t stat[MEMCG_NR_STAT];
48 : atomic_long_t events[NR_VM_EVENT_ITEMS];
49 : + atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
50 :
51 : unsigned long socket_pressure;
52
53 And performance restored.
54
55 Later investigation found that as long as the following 3 fields
56 moving_account, move_lock_task and stat_cpu are in the same cacheline,
57 performance will be good. To avoid future performance surprise by other
58 commits changing the layout of 'struct mem_cgroup', this patch makes
59 sure the 3 fields stay in the same cacheline.
60
61 One concern of this approach is, moving_account and move_lock_task could
62 be modified when a process changes memory cgroup while stat_cpu is a
63 always read field, it might hurt to place them in the same cacheline. I
64 assume it is rare for a process to change memory cgroup so this should
65 be OK.
66
67 Link: https://lkml.kernel.org/r/20180528114019.GF9904@yexl-desktop
68 Link: http://lkml.kernel.org/r/20180601071115.GA27302@intel.com
69 Signed-off-by: Aaron Lu <aaron.lu@intel.com>
70 Reported-by: kernel test robot <xiaolong.ye@intel.com>
71 Cc: Johannes Weiner <hannes@cmpxchg.org>
72 Cc: Michal Hocko <mhocko@kernel.org>
73 Cc: Tejun Heo <tj@kernel.org>
74 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
75 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
76 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
77 Signed-off-by: Sasha Levin <sashal@kernel.org>
78 ---
79 include/linux/memcontrol.h | 23 +++++++++++++++++++----
80 1 file changed, 19 insertions(+), 4 deletions(-)
81
82 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
83 index 6503a9ca27c1..c7876eadd206 100644
84 --- a/include/linux/memcontrol.h
85 +++ b/include/linux/memcontrol.h
86 @@ -155,6 +155,15 @@ enum memcg_kmem_state {
87 KMEM_ONLINE,
88 };
89
90 +#if defined(CONFIG_SMP)
91 +struct memcg_padding {
92 + char x[0];
93 +} ____cacheline_internodealigned_in_smp;
94 +#define MEMCG_PADDING(name) struct memcg_padding name;
95 +#else
96 +#define MEMCG_PADDING(name)
97 +#endif
98 +
99 /*
100 * The memory controller data structure. The memory controller controls both
101 * page cache and RSS per cgroup. We would eventually like to provide
102 @@ -202,7 +211,6 @@ struct mem_cgroup {
103 int oom_kill_disable;
104
105 /* memory.events */
106 - atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
107 struct cgroup_file events_file;
108
109 /* protect arrays of thresholds */
110 @@ -222,19 +230,26 @@ struct mem_cgroup {
111 * mem_cgroup ? And what type of charges should we move ?
112 */
113 unsigned long move_charge_at_immigrate;
114 + /* taken only while moving_account > 0 */
115 + spinlock_t move_lock;
116 + unsigned long move_lock_flags;
117 +
118 + MEMCG_PADDING(_pad1_);
119 +
120 /*
121 * set > 0 if pages under this cgroup are moving to other cgroup.
122 */
123 atomic_t moving_account;
124 - /* taken only while moving_account > 0 */
125 - spinlock_t move_lock;
126 struct task_struct *move_lock_task;
127 - unsigned long move_lock_flags;
128
129 /* memory.stat */
130 struct mem_cgroup_stat_cpu __percpu *stat_cpu;
131 +
132 + MEMCG_PADDING(_pad2_);
133 +
134 atomic_long_t stat[MEMCG_NR_STAT];
135 atomic_long_t events[NR_VM_EVENT_ITEMS];
136 + atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
137
138 unsigned long socket_pressure;
139
140 --
141 2.30.2
142