]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
8cdea7c0 BS |
2 | /* memcontrol.h - Memory Controller |
3 | * | |
4 | * Copyright IBM Corporation, 2007 | |
5 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
6 | * | |
78fb7466 PE |
7 | * Copyright 2007 OpenVZ SWsoft Inc |
8 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8cdea7c0 BS |
9 | */ |
10 | ||
11 | #ifndef _LINUX_MEMCONTROL_H | |
12 | #define _LINUX_MEMCONTROL_H | |
f8d66542 | 13 | #include <linux/cgroup.h> |
456f998e | 14 | #include <linux/vm_event_item.h> |
7ae1e1d0 | 15 | #include <linux/hardirq.h> |
a8964b9b | 16 | #include <linux/jump_label.h> |
33398cf2 MH |
17 | #include <linux/page_counter.h> |
18 | #include <linux/vmpressure.h> | |
19 | #include <linux/eventfd.h> | |
00f3ca2c JW |
20 | #include <linux/mm.h> |
21 | #include <linux/vmstat.h> | |
33398cf2 | 22 | #include <linux/writeback.h> |
fdf1cdb9 | 23 | #include <linux/page-flags.h> |
456f998e | 24 | |
78fb7466 | 25 | struct mem_cgroup; |
bf4f0599 | 26 | struct obj_cgroup; |
8697d331 BS |
27 | struct page; |
28 | struct mm_struct; | |
2633d7a0 | 29 | struct kmem_cache; |
78fb7466 | 30 | |
71cd3113 JW |
31 | /* Cgroup-specific page state, on top of universal node page state */ |
32 | enum memcg_stat_item { | |
468c3982 | 33 | MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, |
71cd3113 | 34 | MEMCG_SOCK, |
772616b0 | 35 | MEMCG_PERCPU_B, |
4e5aa1f4 | 36 | MEMCG_VMALLOC, |
a8c49af3 | 37 | MEMCG_KMEM, |
f4840ccf JW |
38 | MEMCG_ZSWAP_B, |
39 | MEMCG_ZSWAPPED, | |
b2807f07 | 40 | MEMCG_NR_STAT, |
2a7106f2 GT |
41 | }; |
42 | ||
e27be240 JW |
43 | enum memcg_memory_event { |
44 | MEMCG_LOW, | |
71cd3113 JW |
45 | MEMCG_HIGH, |
46 | MEMCG_MAX, | |
47 | MEMCG_OOM, | |
fe6bdfc8 | 48 | MEMCG_OOM_KILL, |
b6bf9abb | 49 | MEMCG_OOM_GROUP_KILL, |
4b82ab4f | 50 | MEMCG_SWAP_HIGH, |
f3a53a3a TH |
51 | MEMCG_SWAP_MAX, |
52 | MEMCG_SWAP_FAIL, | |
e27be240 | 53 | MEMCG_NR_MEMORY_EVENTS, |
71cd3113 JW |
54 | }; |
55 | ||
5660048c | 56 | struct mem_cgroup_reclaim_cookie { |
ef8f2327 | 57 | pg_data_t *pgdat; |
5660048c JW |
58 | unsigned int generation; |
59 | }; | |
60 | ||
71cd3113 JW |
61 | #ifdef CONFIG_MEMCG |
62 | ||
63 | #define MEM_CGROUP_ID_SHIFT 16 | |
64 | #define MEM_CGROUP_ID_MAX USHRT_MAX | |
65 | ||
66 | struct mem_cgroup_id { | |
67 | int id; | |
1c2d479a | 68 | refcount_t ref; |
71cd3113 JW |
69 | }; |
70 | ||
33398cf2 MH |
71 | /* |
72 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | |
0845f831 RD |
73 | * it will be incremented by the number of pages. This counter is used |
74 | * to trigger some periodic events. This is straightforward and better | |
33398cf2 MH |
75 | * than using jiffies etc. to handle periodic memcg event. |
76 | */ | |
77 | enum mem_cgroup_events_target { | |
78 | MEM_CGROUP_TARGET_THRESH, | |
79 | MEM_CGROUP_TARGET_SOFTLIMIT, | |
33398cf2 MH |
80 | MEM_CGROUP_NTARGETS, |
81 | }; | |
82 | ||
871789d4 | 83 | struct memcg_vmstats_percpu { |
2d146aa3 JW |
84 | /* Local (CPU and cgroup) page state & events */ |
85 | long state[MEMCG_NR_STAT]; | |
86 | unsigned long events[NR_VM_EVENT_ITEMS]; | |
87 | ||
88 | /* Delta calculation for lockless upward propagation */ | |
89 | long state_prev[MEMCG_NR_STAT]; | |
90 | unsigned long events_prev[NR_VM_EVENT_ITEMS]; | |
91 | ||
92 | /* Cgroup1: threshold notifications & softlimit tree updates */ | |
93 | unsigned long nr_page_events; | |
94 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
95 | }; | |
96 | ||
97 | struct memcg_vmstats { | |
98 | /* Aggregated (CPU and subtree) page state & events */ | |
99 | long state[MEMCG_NR_STAT]; | |
100 | unsigned long events[NR_VM_EVENT_ITEMS]; | |
101 | ||
102 | /* Pending child counts during tree propagation */ | |
103 | long state_pending[MEMCG_NR_STAT]; | |
104 | unsigned long events_pending[NR_VM_EVENT_ITEMS]; | |
33398cf2 MH |
105 | }; |
106 | ||
107 | struct mem_cgroup_reclaim_iter { | |
108 | struct mem_cgroup *position; | |
109 | /* scan generation, increased every round-trip */ | |
110 | unsigned int generation; | |
111 | }; | |
112 | ||
0a4465d3 | 113 | /* |
3c6f17e6 YS |
114 | * Bitmap and deferred work of shrinker::id corresponding to memcg-aware |
115 | * shrinkers, which have elements charged to this memcg. | |
0a4465d3 | 116 | */ |
e4262c4f | 117 | struct shrinker_info { |
0a4465d3 | 118 | struct rcu_head rcu; |
3c6f17e6 YS |
119 | atomic_long_t *nr_deferred; |
120 | unsigned long *map; | |
0a4465d3 KT |
121 | }; |
122 | ||
7e1c0d6f SB |
123 | struct lruvec_stats_percpu { |
124 | /* Local (CPU and cgroup) state */ | |
125 | long state[NR_VM_NODE_STAT_ITEMS]; | |
126 | ||
127 | /* Delta calculation for lockless upward propagation */ | |
128 | long state_prev[NR_VM_NODE_STAT_ITEMS]; | |
129 | }; | |
130 | ||
131 | struct lruvec_stats { | |
132 | /* Aggregated (CPU and subtree) state */ | |
133 | long state[NR_VM_NODE_STAT_ITEMS]; | |
134 | ||
135 | /* Pending child counts during tree propagation */ | |
136 | long state_pending[NR_VM_NODE_STAT_ITEMS]; | |
137 | }; | |
138 | ||
33398cf2 | 139 | /* |
242c37b4 | 140 | * per-node information in memory controller. |
33398cf2 | 141 | */ |
ef8f2327 | 142 | struct mem_cgroup_per_node { |
33398cf2 | 143 | struct lruvec lruvec; |
a983b5eb | 144 | |
7e1c0d6f SB |
145 | struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; |
146 | struct lruvec_stats lruvec_stats; | |
a983b5eb | 147 | |
b4536f0c | 148 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
33398cf2 | 149 | |
9da83f3f | 150 | struct mem_cgroup_reclaim_iter iter; |
33398cf2 | 151 | |
e4262c4f | 152 | struct shrinker_info __rcu *shrinker_info; |
0a432dcb | 153 | |
33398cf2 MH |
154 | struct rb_node tree_node; /* RB tree node */ |
155 | unsigned long usage_in_excess;/* Set to the value by which */ | |
156 | /* the soft limit is exceeded*/ | |
157 | bool on_tree; | |
158 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | |
159 | /* use container_of */ | |
160 | }; | |
161 | ||
33398cf2 MH |
162 | struct mem_cgroup_threshold { |
163 | struct eventfd_ctx *eventfd; | |
164 | unsigned long threshold; | |
165 | }; | |
166 | ||
167 | /* For threshold */ | |
168 | struct mem_cgroup_threshold_ary { | |
169 | /* An array index points to threshold just below or equal to usage. */ | |
170 | int current_threshold; | |
171 | /* Size of entries[] */ | |
172 | unsigned int size; | |
173 | /* Array of thresholds */ | |
307ed94c | 174 | struct mem_cgroup_threshold entries[]; |
33398cf2 MH |
175 | }; |
176 | ||
177 | struct mem_cgroup_thresholds { | |
178 | /* Primary thresholds array */ | |
179 | struct mem_cgroup_threshold_ary *primary; | |
180 | /* | |
181 | * Spare threshold array. | |
182 | * This is needed to make mem_cgroup_unregister_event() "never fail". | |
183 | * It must be able to store at least primary->size - 1 entries. | |
184 | */ | |
185 | struct mem_cgroup_threshold_ary *spare; | |
186 | }; | |
187 | ||
e81bf979 AL |
188 | #if defined(CONFIG_SMP) |
189 | struct memcg_padding { | |
190 | char x[0]; | |
191 | } ____cacheline_internodealigned_in_smp; | |
6a1803bb | 192 | #define MEMCG_PADDING(name) struct memcg_padding name |
e81bf979 AL |
193 | #else |
194 | #define MEMCG_PADDING(name) | |
195 | #endif | |
196 | ||
97b27821 TH |
197 | /* |
198 | * Remember four most recent foreign writebacks with dirty pages in this | |
199 | * cgroup. Inode sharing is expected to be uncommon and, even if we miss | |
200 | * one in a given round, we're likely to catch it later if it keeps | |
201 | * foreign-dirtying, so a fairly low count should be enough. | |
202 | * | |
203 | * See mem_cgroup_track_foreign_dirty_slowpath() for details. | |
204 | */ | |
205 | #define MEMCG_CGWB_FRN_CNT 4 | |
206 | ||
207 | struct memcg_cgwb_frn { | |
208 | u64 bdi_id; /* bdi->id of the foreign inode */ | |
209 | int memcg_id; /* memcg->css.id of foreign inode */ | |
210 | u64 at; /* jiffies_64 at the time of dirtying */ | |
211 | struct wb_completion done; /* tracks in-flight foreign writebacks */ | |
212 | }; | |
213 | ||
bf4f0599 RG |
214 | /* |
215 | * Bucket for arbitrarily byte-sized objects charged to a memory | |
216 | * cgroup. The bucket can be reparented in one piece when the cgroup | |
217 | * is destroyed, without having to round up the individual references | |
218 | * of all live memory objects in the wild. | |
219 | */ | |
220 | struct obj_cgroup { | |
221 | struct percpu_ref refcnt; | |
222 | struct mem_cgroup *memcg; | |
223 | atomic_t nr_charged_bytes; | |
224 | union { | |
0764db9b | 225 | struct list_head list; /* protected by objcg_lock */ |
bf4f0599 RG |
226 | struct rcu_head rcu; |
227 | }; | |
228 | }; | |
229 | ||
33398cf2 MH |
230 | /* |
231 | * The memory controller data structure. The memory controller controls both | |
232 | * page cache and RSS per cgroup. We would eventually like to provide | |
233 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
234 | * to help the administrator determine what knobs to tune. | |
235 | */ | |
236 | struct mem_cgroup { | |
237 | struct cgroup_subsys_state css; | |
238 | ||
73f576c0 JW |
239 | /* Private memcg ID. Used to ID objects that outlive the cgroup */ |
240 | struct mem_cgroup_id id; | |
241 | ||
33398cf2 | 242 | /* Accounted resources */ |
bd0b230f WL |
243 | struct page_counter memory; /* Both v1 & v2 */ |
244 | ||
245 | union { | |
246 | struct page_counter swap; /* v2 only */ | |
247 | struct page_counter memsw; /* v1 only */ | |
248 | }; | |
0db15298 JW |
249 | |
250 | /* Legacy consumer-oriented counters */ | |
bd0b230f WL |
251 | struct page_counter kmem; /* v1 only */ |
252 | struct page_counter tcpmem; /* v1 only */ | |
33398cf2 | 253 | |
f7e1cb6e JW |
254 | /* Range enforcement for interrupt charges */ |
255 | struct work_struct high_work; | |
256 | ||
f4840ccf JW |
257 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) |
258 | unsigned long zswap_max; | |
259 | #endif | |
260 | ||
33398cf2 MH |
261 | unsigned long soft_limit; |
262 | ||
263 | /* vmpressure notifications */ | |
264 | struct vmpressure vmpressure; | |
265 | ||
3d8b38eb RG |
266 | /* |
267 | * Should the OOM killer kill all belonging tasks, had it kill one? | |
268 | */ | |
269 | bool oom_group; | |
270 | ||
33398cf2 MH |
271 | /* protected by memcg_oom_lock */ |
272 | bool oom_lock; | |
273 | int under_oom; | |
274 | ||
275 | int swappiness; | |
276 | /* OOM-Killer disable */ | |
277 | int oom_kill_disable; | |
278 | ||
1e577f97 | 279 | /* memory.events and memory.events.local */ |
472912a2 | 280 | struct cgroup_file events_file; |
1e577f97 | 281 | struct cgroup_file events_local_file; |
472912a2 | 282 | |
f3a53a3a TH |
283 | /* handle for "memory.swap.events" */ |
284 | struct cgroup_file swap_events_file; | |
285 | ||
33398cf2 MH |
286 | /* protect arrays of thresholds */ |
287 | struct mutex thresholds_lock; | |
288 | ||
289 | /* thresholds for memory usage. RCU-protected */ | |
290 | struct mem_cgroup_thresholds thresholds; | |
291 | ||
292 | /* thresholds for mem+swap usage. RCU-protected */ | |
293 | struct mem_cgroup_thresholds memsw_thresholds; | |
294 | ||
295 | /* For oom notifier event fd */ | |
296 | struct list_head oom_notify; | |
297 | ||
298 | /* | |
299 | * Should we move charges of a task when a task is moved into this | |
300 | * mem_cgroup ? And what type of charges should we move ? | |
301 | */ | |
302 | unsigned long move_charge_at_immigrate; | |
e81bf979 AL |
303 | /* taken only while moving_account > 0 */ |
304 | spinlock_t move_lock; | |
305 | unsigned long move_lock_flags; | |
306 | ||
307 | MEMCG_PADDING(_pad1_); | |
308 | ||
2d146aa3 JW |
309 | /* memory.stat */ |
310 | struct memcg_vmstats vmstats; | |
42a30035 | 311 | |
815744d7 | 312 | /* memory.events */ |
42a30035 | 313 | atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; |
1e577f97 | 314 | atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; |
33398cf2 | 315 | |
d886f4e4 JW |
316 | unsigned long socket_pressure; |
317 | ||
318 | /* Legacy tcp memory accounting */ | |
0db15298 JW |
319 | bool tcpmem_active; |
320 | int tcpmem_pressure; | |
d886f4e4 | 321 | |
84c07d11 | 322 | #ifdef CONFIG_MEMCG_KMEM |
33398cf2 | 323 | int kmemcg_id; |
bf4f0599 | 324 | struct obj_cgroup __rcu *objcg; |
0764db9b RG |
325 | /* list of inherited objcgs, protected by objcg_lock */ |
326 | struct list_head objcg_list; | |
33398cf2 MH |
327 | #endif |
328 | ||
4df91062 FT |
329 | MEMCG_PADDING(_pad2_); |
330 | ||
331 | /* | |
332 | * set > 0 if pages under this cgroup are moving to other cgroup. | |
333 | */ | |
334 | atomic_t moving_account; | |
335 | struct task_struct *move_lock_task; | |
336 | ||
4df91062 FT |
337 | struct memcg_vmstats_percpu __percpu *vmstats_percpu; |
338 | ||
33398cf2 MH |
339 | #ifdef CONFIG_CGROUP_WRITEBACK |
340 | struct list_head cgwb_list; | |
341 | struct wb_domain cgwb_domain; | |
97b27821 | 342 | struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; |
33398cf2 MH |
343 | #endif |
344 | ||
345 | /* List of events which userspace want to receive */ | |
346 | struct list_head event_list; | |
347 | spinlock_t event_list_lock; | |
348 | ||
87eaceb3 YS |
349 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
350 | struct deferred_split deferred_split_queue; | |
351 | #endif | |
352 | ||
b51478a0 | 353 | struct mem_cgroup_per_node *nodeinfo[]; |
33398cf2 | 354 | }; |
7d828602 | 355 | |
a983b5eb JW |
356 | /* |
357 | * size of first charge trial. "32" comes from vmscan.c's magic value. | |
358 | * TODO: maybe necessary to use big numbers in big irons. | |
359 | */ | |
360 | #define MEMCG_CHARGE_BATCH 32U | |
361 | ||
7d828602 | 362 | extern struct mem_cgroup *root_mem_cgroup; |
56161634 | 363 | |
87944e29 RG |
364 | enum page_memcg_data_flags { |
365 | /* page->memcg_data is a pointer to an objcgs vector */ | |
366 | MEMCG_DATA_OBJCGS = (1UL << 0), | |
18b2db3b RG |
367 | /* page has been accounted as a non-slab kernel page */ |
368 | MEMCG_DATA_KMEM = (1UL << 1), | |
87944e29 | 369 | /* the next bit after the last actual flag */ |
18b2db3b | 370 | __NR_MEMCG_DATA_FLAGS = (1UL << 2), |
87944e29 RG |
371 | }; |
372 | ||
373 | #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) | |
374 | ||
1b7e4464 | 375 | static inline bool folio_memcg_kmem(struct folio *folio); |
b4e0b68f MS |
376 | |
377 | /* | |
378 | * After the initialization objcg->memcg is always pointing at | |
379 | * a valid memcg, but can be atomically swapped to the parent memcg. | |
380 | * | |
381 | * The caller must ensure that the returned memcg won't be released: | |
382 | * e.g. acquire the rcu_read_lock or css_set_lock. | |
383 | */ | |
384 | static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) | |
385 | { | |
386 | return READ_ONCE(objcg->memcg); | |
387 | } | |
388 | ||
389 | /* | |
1b7e4464 MWO |
390 | * __folio_memcg - Get the memory cgroup associated with a non-kmem folio |
391 | * @folio: Pointer to the folio. | |
b4e0b68f | 392 | * |
1b7e4464 MWO |
393 | * Returns a pointer to the memory cgroup associated with the folio, |
394 | * or NULL. This function assumes that the folio is known to have a | |
b4e0b68f | 395 | * proper memory cgroup pointer. It's not safe to call this function |
1b7e4464 MWO |
396 | * against some type of folios, e.g. slab folios or ex-slab folios or |
397 | * kmem folios. | |
b4e0b68f | 398 | */ |
1b7e4464 | 399 | static inline struct mem_cgroup *__folio_memcg(struct folio *folio) |
b4e0b68f | 400 | { |
1b7e4464 | 401 | unsigned long memcg_data = folio->memcg_data; |
b4e0b68f | 402 | |
1b7e4464 MWO |
403 | VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); |
404 | VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); | |
405 | VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); | |
b4e0b68f MS |
406 | |
407 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
408 | } | |
409 | ||
410 | /* | |
1b7e4464 MWO |
411 | * __folio_objcg - get the object cgroup associated with a kmem folio. |
412 | * @folio: Pointer to the folio. | |
b4e0b68f | 413 | * |
1b7e4464 MWO |
414 | * Returns a pointer to the object cgroup associated with the folio, |
415 | * or NULL. This function assumes that the folio is known to have a | |
b4e0b68f | 416 | * proper object cgroup pointer. It's not safe to call this function |
1b7e4464 MWO |
417 | * against some type of folios, e.g. slab folios or ex-slab folios or |
418 | * LRU folios. | |
b4e0b68f | 419 | */ |
1b7e4464 | 420 | static inline struct obj_cgroup *__folio_objcg(struct folio *folio) |
b4e0b68f | 421 | { |
1b7e4464 | 422 | unsigned long memcg_data = folio->memcg_data; |
b4e0b68f | 423 | |
1b7e4464 MWO |
424 | VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); |
425 | VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); | |
426 | VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); | |
b4e0b68f MS |
427 | |
428 | return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
429 | } | |
430 | ||
bcfe06bf | 431 | /* |
1b7e4464 MWO |
432 | * folio_memcg - Get the memory cgroup associated with a folio. |
433 | * @folio: Pointer to the folio. | |
bcfe06bf | 434 | * |
1b7e4464 MWO |
435 | * Returns a pointer to the memory cgroup associated with the folio, |
436 | * or NULL. This function assumes that the folio is known to have a | |
bcfe06bf | 437 | * proper memory cgroup pointer. It's not safe to call this function |
1b7e4464 | 438 | * against some type of folios, e.g. slab folios or ex-slab folios. |
bcfe06bf | 439 | * |
1b7e4464 | 440 | * For a non-kmem folio any of the following ensures folio and memcg binding |
b4e0b68f MS |
441 | * stability: |
442 | * | |
1b7e4464 | 443 | * - the folio lock |
bcfe06bf RG |
444 | * - LRU isolation |
445 | * - lock_page_memcg() | |
446 | * - exclusive reference | |
b4e0b68f | 447 | * |
1b7e4464 MWO |
448 | * For a kmem folio a caller should hold an rcu read lock to protect memcg |
449 | * associated with a kmem folio from being released. | |
bcfe06bf | 450 | */ |
1b7e4464 MWO |
451 | static inline struct mem_cgroup *folio_memcg(struct folio *folio) |
452 | { | |
453 | if (folio_memcg_kmem(folio)) | |
454 | return obj_cgroup_memcg(__folio_objcg(folio)); | |
455 | return __folio_memcg(folio); | |
456 | } | |
457 | ||
bcfe06bf RG |
458 | static inline struct mem_cgroup *page_memcg(struct page *page) |
459 | { | |
1b7e4464 | 460 | return folio_memcg(page_folio(page)); |
bcfe06bf RG |
461 | } |
462 | ||
c5ce619a MWO |
463 | /** |
464 | * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. | |
465 | * @folio: Pointer to the folio. | |
bcfe06bf | 466 | * |
c5ce619a | 467 | * This function assumes that the folio is known to have a |
bcfe06bf | 468 | * proper memory cgroup pointer. It's not safe to call this function |
c5ce619a MWO |
469 | * against some type of folios, e.g. slab folios or ex-slab folios. |
470 | * | |
471 | * Return: A pointer to the memory cgroup associated with the folio, | |
472 | * or NULL. | |
bcfe06bf | 473 | */ |
c5ce619a | 474 | static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) |
bcfe06bf | 475 | { |
c5ce619a | 476 | unsigned long memcg_data = READ_ONCE(folio->memcg_data); |
b4e0b68f | 477 | |
c5ce619a | 478 | VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); |
bcfe06bf RG |
479 | WARN_ON_ONCE(!rcu_read_lock_held()); |
480 | ||
b4e0b68f MS |
481 | if (memcg_data & MEMCG_DATA_KMEM) { |
482 | struct obj_cgroup *objcg; | |
483 | ||
484 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
485 | return obj_cgroup_memcg(objcg); | |
486 | } | |
487 | ||
488 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
bcfe06bf RG |
489 | } |
490 | ||
491 | /* | |
492 | * page_memcg_check - get the memory cgroup associated with a page | |
493 | * @page: a pointer to the page struct | |
494 | * | |
495 | * Returns a pointer to the memory cgroup associated with the page, | |
b4e0b68f | 496 | * or NULL. This function unlike page_memcg() can take any page |
bcfe06bf | 497 | * as an argument. It has to be used in cases when it's not known if a page |
b4e0b68f MS |
498 | * has an associated memory cgroup pointer or an object cgroups vector or |
499 | * an object cgroup. | |
500 | * | |
501 | * For a non-kmem page any of the following ensures page and memcg binding | |
502 | * stability: | |
bcfe06bf | 503 | * |
bcfe06bf RG |
504 | * - the page lock |
505 | * - LRU isolation | |
506 | * - lock_page_memcg() | |
507 | * - exclusive reference | |
b4e0b68f MS |
508 | * |
509 | * For a kmem page a caller should hold an rcu read lock to protect memcg | |
510 | * associated with a kmem page from being released. | |
bcfe06bf RG |
511 | */ |
512 | static inline struct mem_cgroup *page_memcg_check(struct page *page) | |
513 | { | |
514 | /* | |
515 | * Because page->memcg_data might be changed asynchronously | |
516 | * for slab pages, READ_ONCE() should be used here. | |
517 | */ | |
518 | unsigned long memcg_data = READ_ONCE(page->memcg_data); | |
519 | ||
87944e29 | 520 | if (memcg_data & MEMCG_DATA_OBJCGS) |
bcfe06bf RG |
521 | return NULL; |
522 | ||
b4e0b68f MS |
523 | if (memcg_data & MEMCG_DATA_KMEM) { |
524 | struct obj_cgroup *objcg; | |
525 | ||
526 | objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
527 | return obj_cgroup_memcg(objcg); | |
528 | } | |
529 | ||
18b2db3b RG |
530 | return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
531 | } | |
532 | ||
88f2ef73 MS |
533 | static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) |
534 | { | |
535 | struct mem_cgroup *memcg; | |
536 | ||
537 | rcu_read_lock(); | |
538 | retry: | |
539 | memcg = obj_cgroup_memcg(objcg); | |
540 | if (unlikely(!css_tryget(&memcg->css))) | |
541 | goto retry; | |
542 | rcu_read_unlock(); | |
543 | ||
544 | return memcg; | |
545 | } | |
546 | ||
bd290e1e | 547 | #ifdef CONFIG_MEMCG_KMEM |
18b2db3b | 548 | /* |
1b7e4464 MWO |
549 | * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. |
550 | * @folio: Pointer to the folio. | |
18b2db3b | 551 | * |
1b7e4464 MWO |
552 | * Checks if the folio has MemcgKmem flag set. The caller must ensure |
553 | * that the folio has an associated memory cgroup. It's not safe to call | |
554 | * this function against some types of folios, e.g. slab folios. | |
18b2db3b | 555 | */ |
1b7e4464 | 556 | static inline bool folio_memcg_kmem(struct folio *folio) |
18b2db3b | 557 | { |
1b7e4464 MWO |
558 | VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); |
559 | VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); | |
560 | return folio->memcg_data & MEMCG_DATA_KMEM; | |
bcfe06bf RG |
561 | } |
562 | ||
270c6a71 | 563 | |
270c6a71 | 564 | #else |
1b7e4464 | 565 | static inline bool folio_memcg_kmem(struct folio *folio) |
bd290e1e MS |
566 | { |
567 | return false; | |
568 | } | |
569 | ||
270c6a71 RG |
570 | #endif |
571 | ||
1b7e4464 MWO |
572 | static inline bool PageMemcgKmem(struct page *page) |
573 | { | |
574 | return folio_memcg_kmem(page_folio(page)); | |
575 | } | |
576 | ||
dfd2f10c KT |
577 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
578 | { | |
579 | return (memcg == root_mem_cgroup); | |
580 | } | |
581 | ||
23047a96 JW |
582 | static inline bool mem_cgroup_disabled(void) |
583 | { | |
584 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | |
585 | } | |
586 | ||
f56ce412 JW |
587 | static inline void mem_cgroup_protection(struct mem_cgroup *root, |
588 | struct mem_cgroup *memcg, | |
589 | unsigned long *min, | |
590 | unsigned long *low) | |
9783aa99 | 591 | { |
f56ce412 JW |
592 | *min = *low = 0; |
593 | ||
1bc63fb1 | 594 | if (mem_cgroup_disabled()) |
f56ce412 | 595 | return; |
1bc63fb1 | 596 | |
22f7496f YS |
597 | /* |
598 | * There is no reclaim protection applied to a targeted reclaim. | |
599 | * We are special casing this specific case here because | |
600 | * mem_cgroup_protected calculation is not robust enough to keep | |
601 | * the protection invariant for calculated effective values for | |
602 | * parallel reclaimers with different reclaim target. This is | |
603 | * especially a problem for tail memcgs (as they have pages on LRU) | |
604 | * which would want to have effective values 0 for targeted reclaim | |
605 | * but a different value for external reclaim. | |
606 | * | |
607 | * Example | |
608 | * Let's have global and A's reclaim in parallel: | |
609 | * | | |
610 | * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) | |
611 | * |\ | |
612 | * | C (low = 1G, usage = 2.5G) | |
613 | * B (low = 1G, usage = 0.5G) | |
614 | * | |
615 | * For the global reclaim | |
616 | * A.elow = A.low | |
617 | * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow | |
618 | * C.elow = min(C.usage, C.low) | |
619 | * | |
620 | * With the effective values resetting we have A reclaim | |
621 | * A.elow = 0 | |
622 | * B.elow = B.low | |
623 | * C.elow = C.low | |
624 | * | |
625 | * If the global reclaim races with A's reclaim then | |
626 | * B.elow = C.elow = 0 because children_low_usage > A.elow) | |
627 | * is possible and reclaiming B would be violating the protection. | |
628 | * | |
629 | */ | |
630 | if (root == memcg) | |
f56ce412 | 631 | return; |
9783aa99 | 632 | |
f56ce412 JW |
633 | *min = READ_ONCE(memcg->memory.emin); |
634 | *low = READ_ONCE(memcg->memory.elow); | |
9783aa99 CD |
635 | } |
636 | ||
45c7f7e1 CD |
637 | void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
638 | struct mem_cgroup *memcg); | |
639 | ||
640 | static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) | |
641 | { | |
642 | /* | |
643 | * The root memcg doesn't account charges, and doesn't support | |
644 | * protection. | |
645 | */ | |
646 | return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); | |
647 | ||
648 | } | |
649 | ||
650 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
651 | { | |
652 | if (!mem_cgroup_supports_protection(memcg)) | |
653 | return false; | |
654 | ||
655 | return READ_ONCE(memcg->memory.elow) >= | |
656 | page_counter_read(&memcg->memory); | |
657 | } | |
658 | ||
659 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
660 | { | |
661 | if (!mem_cgroup_supports_protection(memcg)) | |
662 | return false; | |
663 | ||
664 | return READ_ONCE(memcg->memory.emin) >= | |
665 | page_counter_read(&memcg->memory); | |
666 | } | |
241994ed | 667 | |
8f425e4e MWO |
668 | int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); |
669 | ||
670 | /** | |
671 | * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. | |
672 | * @folio: Folio to charge. | |
673 | * @mm: mm context of the allocating task. | |
674 | * @gfp: Reclaim mode. | |
675 | * | |
676 | * Try to charge @folio to the memcg that @mm belongs to, reclaiming | |
677 | * pages according to @gfp if necessary. If @mm is NULL, try to | |
678 | * charge to the active memcg. | |
679 | * | |
680 | * Do not use this for folios allocated for swapin. | |
681 | * | |
682 | * Return: 0 on success. Otherwise, an error code is returned. | |
683 | */ | |
684 | static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, | |
685 | gfp_t gfp) | |
2c8d8f97 SB |
686 | { |
687 | if (mem_cgroup_disabled()) | |
688 | return 0; | |
8f425e4e | 689 | return __mem_cgroup_charge(folio, mm, gfp); |
2c8d8f97 SB |
690 | } |
691 | ||
0add0c77 SB |
692 | int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, |
693 | gfp_t gfp, swp_entry_t entry); | |
694 | void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); | |
3fea5a49 | 695 | |
bbc6b703 MWO |
696 | void __mem_cgroup_uncharge(struct folio *folio); |
697 | ||
698 | /** | |
699 | * mem_cgroup_uncharge - Uncharge a folio. | |
700 | * @folio: Folio to uncharge. | |
701 | * | |
702 | * Uncharge a folio previously charged with mem_cgroup_charge(). | |
703 | */ | |
704 | static inline void mem_cgroup_uncharge(struct folio *folio) | |
2c8d8f97 SB |
705 | { |
706 | if (mem_cgroup_disabled()) | |
707 | return; | |
bbc6b703 | 708 | __mem_cgroup_uncharge(folio); |
2c8d8f97 SB |
709 | } |
710 | ||
711 | void __mem_cgroup_uncharge_list(struct list_head *page_list); | |
712 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) | |
713 | { | |
714 | if (mem_cgroup_disabled()) | |
715 | return; | |
716 | __mem_cgroup_uncharge_list(page_list); | |
717 | } | |
569b846d | 718 | |
d21bba2b | 719 | void mem_cgroup_migrate(struct folio *old, struct folio *new); |
569b846d | 720 | |
55779ec7 | 721 | /** |
867e5e1d | 722 | * mem_cgroup_lruvec - get the lru list vector for a memcg & node |
55779ec7 | 723 | * @memcg: memcg of the wanted lruvec |
9a1ac228 | 724 | * @pgdat: pglist_data |
55779ec7 | 725 | * |
867e5e1d | 726 | * Returns the lru list vector holding pages for a given @memcg & |
9a1ac228 | 727 | * @pgdat combination. This can be the node lruvec, if the memory |
867e5e1d | 728 | * controller is disabled. |
55779ec7 | 729 | */ |
867e5e1d JW |
730 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
731 | struct pglist_data *pgdat) | |
55779ec7 | 732 | { |
ef8f2327 | 733 | struct mem_cgroup_per_node *mz; |
55779ec7 JW |
734 | struct lruvec *lruvec; |
735 | ||
736 | if (mem_cgroup_disabled()) { | |
867e5e1d | 737 | lruvec = &pgdat->__lruvec; |
55779ec7 JW |
738 | goto out; |
739 | } | |
740 | ||
1b05117d JW |
741 | if (!memcg) |
742 | memcg = root_mem_cgroup; | |
743 | ||
a3747b53 | 744 | mz = memcg->nodeinfo[pgdat->node_id]; |
55779ec7 JW |
745 | lruvec = &mz->lruvec; |
746 | out: | |
747 | /* | |
748 | * Since a node can be onlined after the mem_cgroup was created, | |
599d0c95 | 749 | * we have to be prepared to initialize lruvec->pgdat here; |
55779ec7 JW |
750 | * and if offlined then reonlined, we need to reinitialize it. |
751 | */ | |
ef8f2327 MG |
752 | if (unlikely(lruvec->pgdat != pgdat)) |
753 | lruvec->pgdat = pgdat; | |
55779ec7 JW |
754 | return lruvec; |
755 | } | |
756 | ||
9a1ac228 | 757 | /** |
b1baabd9 MWO |
758 | * folio_lruvec - return lruvec for isolating/putting an LRU folio |
759 | * @folio: Pointer to the folio. | |
9a1ac228 | 760 | * |
b1baabd9 | 761 | * This function relies on folio->mem_cgroup being stable. |
9a1ac228 | 762 | */ |
b1baabd9 | 763 | static inline struct lruvec *folio_lruvec(struct folio *folio) |
9a1ac228 | 764 | { |
b1baabd9 | 765 | struct mem_cgroup *memcg = folio_memcg(folio); |
9a1ac228 | 766 | |
b1baabd9 MWO |
767 | VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); |
768 | return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); | |
9a1ac228 | 769 | } |
c9b0ed51 | 770 | |
64219994 | 771 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
e993d905 | 772 | |
d46eb14b SB |
773 | struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); |
774 | ||
e809c3fe MWO |
775 | struct lruvec *folio_lruvec_lock(struct folio *folio); |
776 | struct lruvec *folio_lruvec_lock_irq(struct folio *folio); | |
777 | struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, | |
6168d0da AS |
778 | unsigned long *flags); |
779 | ||
780 | #ifdef CONFIG_DEBUG_VM | |
e809c3fe | 781 | void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); |
6168d0da | 782 | #else |
e809c3fe MWO |
783 | static inline |
784 | void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) | |
6168d0da AS |
785 | { |
786 | } | |
787 | #endif | |
788 | ||
33398cf2 MH |
789 | static inline |
790 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ | |
791 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | |
792 | } | |
793 | ||
bf4f0599 RG |
794 | static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) |
795 | { | |
796 | return percpu_ref_tryget(&objcg->refcnt); | |
797 | } | |
798 | ||
799 | static inline void obj_cgroup_get(struct obj_cgroup *objcg) | |
800 | { | |
801 | percpu_ref_get(&objcg->refcnt); | |
802 | } | |
803 | ||
b4e0b68f MS |
804 | static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, |
805 | unsigned long nr) | |
bf4f0599 | 806 | { |
b4e0b68f | 807 | percpu_ref_get_many(&objcg->refcnt, nr); |
bf4f0599 RG |
808 | } |
809 | ||
b4e0b68f | 810 | static inline void obj_cgroup_put(struct obj_cgroup *objcg) |
bf4f0599 | 811 | { |
b4e0b68f | 812 | percpu_ref_put(&objcg->refcnt); |
bf4f0599 RG |
813 | } |
814 | ||
dc0b5864 RG |
815 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
816 | { | |
d46eb14b SB |
817 | if (memcg) |
818 | css_put(&memcg->css); | |
dc0b5864 RG |
819 | } |
820 | ||
8e8ae645 JW |
821 | #define mem_cgroup_from_counter(counter, member) \ |
822 | container_of(counter, struct mem_cgroup, member) | |
823 | ||
33398cf2 MH |
824 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
825 | struct mem_cgroup *, | |
826 | struct mem_cgroup_reclaim_cookie *); | |
827 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
7c5f64f8 VD |
828 | int mem_cgroup_scan_tasks(struct mem_cgroup *, |
829 | int (*)(struct task_struct *, void *), void *); | |
33398cf2 | 830 | |
23047a96 JW |
831 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
832 | { | |
833 | if (mem_cgroup_disabled()) | |
834 | return 0; | |
835 | ||
73f576c0 | 836 | return memcg->id.id; |
23047a96 | 837 | } |
73f576c0 | 838 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
23047a96 | 839 | |
c15187a4 RG |
840 | #ifdef CONFIG_SHRINKER_DEBUG |
841 | static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) | |
842 | { | |
843 | return memcg ? cgroup_ino(memcg->css.cgroup) : 0; | |
844 | } | |
845 | ||
846 | struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); | |
847 | #endif | |
848 | ||
aa9694bb CD |
849 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
850 | { | |
851 | return mem_cgroup_from_css(seq_css(m)); | |
852 | } | |
853 | ||
2262185c RG |
854 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
855 | { | |
856 | struct mem_cgroup_per_node *mz; | |
857 | ||
858 | if (mem_cgroup_disabled()) | |
859 | return NULL; | |
860 | ||
861 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
862 | return mz->memcg; | |
863 | } | |
864 | ||
8e8ae645 JW |
865 | /** |
866 | * parent_mem_cgroup - find the accounting parent of a memcg | |
867 | * @memcg: memcg whose parent to find | |
868 | * | |
869 | * Returns the parent memcg, or NULL if this is the root or the memory | |
870 | * controller is in legacy no-hierarchy mode. | |
871 | */ | |
872 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) | |
873 | { | |
486bc706 | 874 | return mem_cgroup_from_css(memcg->css.parent); |
8e8ae645 JW |
875 | } |
876 | ||
33398cf2 MH |
877 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
878 | struct mem_cgroup *root) | |
879 | { | |
880 | if (root == memcg) | |
881 | return true; | |
33398cf2 MH |
882 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); |
883 | } | |
e1aab161 | 884 | |
2314b42d JW |
885 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
886 | struct mem_cgroup *memcg) | |
2e4d4091 | 887 | { |
587af308 | 888 | struct mem_cgroup *task_memcg; |
413918bb | 889 | bool match = false; |
c3ac9a8a | 890 | |
2e4d4091 | 891 | rcu_read_lock(); |
587af308 | 892 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
413918bb | 893 | if (task_memcg) |
2314b42d | 894 | match = mem_cgroup_is_descendant(task_memcg, memcg); |
2e4d4091 | 895 | rcu_read_unlock(); |
c3ac9a8a | 896 | return match; |
2e4d4091 | 897 | } |
8a9f3ccd | 898 | |
64219994 | 899 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); |
2fc04524 | 900 | ino_t page_cgroup_ino(struct page *page); |
d324236b | 901 | |
eb01aaab VD |
902 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
903 | { | |
904 | if (mem_cgroup_disabled()) | |
905 | return true; | |
906 | return !!(memcg->css.flags & CSS_ONLINE); | |
907 | } | |
908 | ||
33398cf2 | 909 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
b4536f0c | 910 | int zid, int nr_pages); |
33398cf2 | 911 | |
b4536f0c MH |
912 | static inline |
913 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
914 | enum lru_list lru, int zone_idx) | |
915 | { | |
916 | struct mem_cgroup_per_node *mz; | |
917 | ||
918 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
e0e3f42f | 919 | return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); |
33398cf2 MH |
920 | } |
921 | ||
b23afb93 TH |
922 | void mem_cgroup_handle_over_high(void); |
923 | ||
bbec2e15 | 924 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); |
7c5f64f8 | 925 | |
9783aa99 CD |
926 | unsigned long mem_cgroup_size(struct mem_cgroup *memcg); |
927 | ||
f0c867d9 | 928 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, |
64219994 | 929 | struct task_struct *p); |
58ae83db | 930 | |
f0c867d9 | 931 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); |
932 | ||
29ef680a | 933 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 | 934 | { |
29ef680a MH |
935 | WARN_ON(current->in_user_fault); |
936 | current->in_user_fault = 1; | |
519e5247 JW |
937 | } |
938 | ||
29ef680a | 939 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 | 940 | { |
29ef680a MH |
941 | WARN_ON(!current->in_user_fault); |
942 | current->in_user_fault = 0; | |
519e5247 JW |
943 | } |
944 | ||
3812c8c8 JW |
945 | static inline bool task_in_memcg_oom(struct task_struct *p) |
946 | { | |
626ebc41 | 947 | return p->memcg_in_oom; |
3812c8c8 JW |
948 | } |
949 | ||
49426420 | 950 | bool mem_cgroup_oom_synchronize(bool wait); |
3d8b38eb RG |
951 | struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, |
952 | struct mem_cgroup *oom_domain); | |
953 | void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); | |
3812c8c8 | 954 | |
f70ad448 MWO |
955 | void folio_memcg_lock(struct folio *folio); |
956 | void folio_memcg_unlock(struct folio *folio); | |
1c824a68 | 957 | void lock_page_memcg(struct page *page); |
62cccb8c | 958 | void unlock_page_memcg(struct page *page); |
d7365e78 | 959 | |
db9adbcb | 960 | void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); |
2a2e4885 | 961 | |
04fecbf5 | 962 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 963 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 964 | int idx, int val) |
2a2e4885 | 965 | { |
c3cc3911 JW |
966 | unsigned long flags; |
967 | ||
968 | local_irq_save(flags); | |
a983b5eb | 969 | __mod_memcg_state(memcg, idx, val); |
c3cc3911 | 970 | local_irq_restore(flags); |
2a2e4885 JW |
971 | } |
972 | ||
4e5aa1f4 SB |
973 | static inline void mod_memcg_page_state(struct page *page, |
974 | int idx, int val) | |
975 | { | |
976 | struct mem_cgroup *memcg; | |
977 | ||
978 | if (mem_cgroup_disabled()) | |
979 | return; | |
980 | ||
981 | rcu_read_lock(); | |
982 | memcg = page_memcg(page); | |
983 | if (memcg) | |
984 | mod_memcg_state(memcg, idx, val); | |
985 | rcu_read_unlock(); | |
986 | } | |
987 | ||
7490a2d2 SB |
988 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) |
989 | { | |
dbb16df6 SB |
990 | long x = READ_ONCE(memcg->vmstats.state[idx]); |
991 | #ifdef CONFIG_SMP | |
992 | if (x < 0) | |
993 | x = 0; | |
994 | #endif | |
995 | return x; | |
7490a2d2 SB |
996 | } |
997 | ||
42a30035 JW |
998 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
999 | enum node_stat_item idx) | |
1000 | { | |
1001 | struct mem_cgroup_per_node *pn; | |
dbb16df6 | 1002 | long x; |
42a30035 JW |
1003 | |
1004 | if (mem_cgroup_disabled()) | |
1005 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
1006 | ||
1007 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
dbb16df6 SB |
1008 | x = READ_ONCE(pn->lruvec_stats.state[idx]); |
1009 | #ifdef CONFIG_SMP | |
1010 | if (x < 0) | |
1011 | x = 0; | |
1012 | #endif | |
1013 | return x; | |
42a30035 JW |
1014 | } |
1015 | ||
205b20cc JW |
1016 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
1017 | enum node_stat_item idx) | |
2a7106f2 | 1018 | { |
00f3ca2c | 1019 | struct mem_cgroup_per_node *pn; |
815744d7 JW |
1020 | long x = 0; |
1021 | int cpu; | |
00f3ca2c JW |
1022 | |
1023 | if (mem_cgroup_disabled()) | |
1024 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
1025 | ||
1026 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
815744d7 | 1027 | for_each_possible_cpu(cpu) |
7e1c0d6f | 1028 | x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); |
a983b5eb JW |
1029 | #ifdef CONFIG_SMP |
1030 | if (x < 0) | |
1031 | x = 0; | |
1032 | #endif | |
1033 | return x; | |
2a7106f2 GT |
1034 | } |
1035 | ||
aa48e47e | 1036 | void mem_cgroup_flush_stats(void); |
9b301615 | 1037 | void mem_cgroup_flush_stats_delayed(void); |
aa48e47e | 1038 | |
eedc4e5a RG |
1039 | void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
1040 | int val); | |
da3ceeff | 1041 | void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); |
991e7673 | 1042 | |
da3ceeff | 1043 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
991e7673 SB |
1044 | int val) |
1045 | { | |
1046 | unsigned long flags; | |
1047 | ||
1048 | local_irq_save(flags); | |
da3ceeff | 1049 | __mod_lruvec_kmem_state(p, idx, val); |
991e7673 SB |
1050 | local_irq_restore(flags); |
1051 | } | |
1052 | ||
eedc4e5a RG |
1053 | static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, |
1054 | enum node_stat_item idx, int val) | |
1055 | { | |
1056 | unsigned long flags; | |
1057 | ||
1058 | local_irq_save(flags); | |
1059 | __mod_memcg_lruvec_state(lruvec, idx, val); | |
1060 | local_irq_restore(flags); | |
1061 | } | |
1062 | ||
db9adbcb JW |
1063 | void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, |
1064 | unsigned long count); | |
c9019e9b | 1065 | |
2262185c | 1066 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
e27be240 JW |
1067 | enum vm_event_item idx, |
1068 | unsigned long count) | |
2262185c | 1069 | { |
c3cc3911 JW |
1070 | unsigned long flags; |
1071 | ||
1072 | local_irq_save(flags); | |
a983b5eb | 1073 | __count_memcg_events(memcg, idx, count); |
c3cc3911 | 1074 | local_irq_restore(flags); |
2262185c RG |
1075 | } |
1076 | ||
1077 | static inline void count_memcg_page_event(struct page *page, | |
e27be240 | 1078 | enum vm_event_item idx) |
2262185c | 1079 | { |
bcfe06bf RG |
1080 | struct mem_cgroup *memcg = page_memcg(page); |
1081 | ||
1082 | if (memcg) | |
1083 | count_memcg_events(memcg, idx, 1); | |
2262185c RG |
1084 | } |
1085 | ||
64daa5d8 MWO |
1086 | static inline void count_memcg_folio_events(struct folio *folio, |
1087 | enum vm_event_item idx, unsigned long nr) | |
1088 | { | |
1089 | struct mem_cgroup *memcg = folio_memcg(folio); | |
1090 | ||
1091 | if (memcg) | |
1092 | count_memcg_events(memcg, idx, nr); | |
1093 | } | |
1094 | ||
2262185c RG |
1095 | static inline void count_memcg_event_mm(struct mm_struct *mm, |
1096 | enum vm_event_item idx) | |
68ae564b | 1097 | { |
33398cf2 MH |
1098 | struct mem_cgroup *memcg; |
1099 | ||
68ae564b DR |
1100 | if (mem_cgroup_disabled()) |
1101 | return; | |
33398cf2 MH |
1102 | |
1103 | rcu_read_lock(); | |
1104 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
fe6bdfc8 | 1105 | if (likely(memcg)) |
c9019e9b | 1106 | count_memcg_events(memcg, idx, 1); |
33398cf2 | 1107 | rcu_read_unlock(); |
68ae564b | 1108 | } |
c9019e9b | 1109 | |
e27be240 JW |
1110 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1111 | enum memcg_memory_event event) | |
c9019e9b | 1112 | { |
8b21ca02 MS |
1113 | bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || |
1114 | event == MEMCG_SWAP_FAIL; | |
1115 | ||
1e577f97 | 1116 | atomic_long_inc(&memcg->memory_events_local[event]); |
8b21ca02 MS |
1117 | if (!swap_event) |
1118 | cgroup_file_notify(&memcg->events_local_file); | |
1e577f97 | 1119 | |
9852ae3f CD |
1120 | do { |
1121 | atomic_long_inc(&memcg->memory_events[event]); | |
8b21ca02 MS |
1122 | if (swap_event) |
1123 | cgroup_file_notify(&memcg->swap_events_file); | |
1124 | else | |
1125 | cgroup_file_notify(&memcg->events_file); | |
9852ae3f | 1126 | |
04fd61a4 YS |
1127 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
1128 | break; | |
9852ae3f CD |
1129 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) |
1130 | break; | |
1131 | } while ((memcg = parent_mem_cgroup(memcg)) && | |
1132 | !mem_cgroup_is_root(memcg)); | |
c9019e9b JW |
1133 | } |
1134 | ||
fe6bdfc8 RG |
1135 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1136 | enum memcg_memory_event event) | |
1137 | { | |
1138 | struct mem_cgroup *memcg; | |
1139 | ||
1140 | if (mem_cgroup_disabled()) | |
1141 | return; | |
1142 | ||
1143 | rcu_read_lock(); | |
1144 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
1145 | if (likely(memcg)) | |
1146 | memcg_memory_event(memcg, event); | |
1147 | rcu_read_unlock(); | |
1148 | } | |
1149 | ||
be6c8982 | 1150 | void split_page_memcg(struct page *head, unsigned int nr); |
ca3e0214 | 1151 | |
2d146aa3 JW |
1152 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
1153 | gfp_t gfp_mask, | |
1154 | unsigned long *total_scanned); | |
1155 | ||
c255a458 | 1156 | #else /* CONFIG_MEMCG */ |
23047a96 JW |
1157 | |
1158 | #define MEM_CGROUP_ID_SHIFT 0 | |
1159 | #define MEM_CGROUP_ID_MAX 0 | |
1160 | ||
1b7e4464 MWO |
1161 | static inline struct mem_cgroup *folio_memcg(struct folio *folio) |
1162 | { | |
1163 | return NULL; | |
1164 | } | |
1165 | ||
bcfe06bf RG |
1166 | static inline struct mem_cgroup *page_memcg(struct page *page) |
1167 | { | |
1168 | return NULL; | |
1169 | } | |
1170 | ||
c5ce619a | 1171 | static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) |
bcfe06bf RG |
1172 | { |
1173 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
1174 | return NULL; | |
1175 | } | |
1176 | ||
1177 | static inline struct mem_cgroup *page_memcg_check(struct page *page) | |
1178 | { | |
1179 | return NULL; | |
1180 | } | |
1181 | ||
1b7e4464 MWO |
1182 | static inline bool folio_memcg_kmem(struct folio *folio) |
1183 | { | |
1184 | return false; | |
1185 | } | |
1186 | ||
18b2db3b RG |
1187 | static inline bool PageMemcgKmem(struct page *page) |
1188 | { | |
1189 | return false; | |
1190 | } | |
1191 | ||
dfd2f10c KT |
1192 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
1193 | { | |
1194 | return true; | |
1195 | } | |
1196 | ||
23047a96 JW |
1197 | static inline bool mem_cgroup_disabled(void) |
1198 | { | |
1199 | return true; | |
1200 | } | |
1201 | ||
e27be240 JW |
1202 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
1203 | enum memcg_memory_event event) | |
241994ed JW |
1204 | { |
1205 | } | |
1206 | ||
fe6bdfc8 RG |
1207 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
1208 | enum memcg_memory_event event) | |
1209 | { | |
1210 | } | |
1211 | ||
f56ce412 JW |
1212 | static inline void mem_cgroup_protection(struct mem_cgroup *root, |
1213 | struct mem_cgroup *memcg, | |
1214 | unsigned long *min, | |
1215 | unsigned long *low) | |
9783aa99 | 1216 | { |
f56ce412 | 1217 | *min = *low = 0; |
9783aa99 CD |
1218 | } |
1219 | ||
45c7f7e1 CD |
1220 | static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
1221 | struct mem_cgroup *memcg) | |
1222 | { | |
1223 | } | |
1224 | ||
1225 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
1226 | { | |
1227 | return false; | |
1228 | } | |
1229 | ||
1230 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
241994ed | 1231 | { |
45c7f7e1 | 1232 | return false; |
241994ed JW |
1233 | } |
1234 | ||
8f425e4e MWO |
1235 | static inline int mem_cgroup_charge(struct folio *folio, |
1236 | struct mm_struct *mm, gfp_t gfp) | |
3fea5a49 JW |
1237 | { |
1238 | return 0; | |
1239 | } | |
1240 | ||
0add0c77 SB |
1241 | static inline int mem_cgroup_swapin_charge_page(struct page *page, |
1242 | struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) | |
1243 | { | |
1244 | return 0; | |
1245 | } | |
1246 | ||
1247 | static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) | |
1248 | { | |
1249 | } | |
1250 | ||
bbc6b703 | 1251 | static inline void mem_cgroup_uncharge(struct folio *folio) |
569b846d KH |
1252 | { |
1253 | } | |
1254 | ||
747db954 | 1255 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
8a9f3ccd BS |
1256 | { |
1257 | } | |
1258 | ||
d21bba2b | 1259 | static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) |
69029cd5 KH |
1260 | { |
1261 | } | |
1262 | ||
867e5e1d JW |
1263 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
1264 | struct pglist_data *pgdat) | |
08e552c6 | 1265 | { |
867e5e1d | 1266 | return &pgdat->__lruvec; |
08e552c6 KH |
1267 | } |
1268 | ||
b1baabd9 | 1269 | static inline struct lruvec *folio_lruvec(struct folio *folio) |
66e1707b | 1270 | { |
b1baabd9 | 1271 | struct pglist_data *pgdat = folio_pgdat(folio); |
867e5e1d | 1272 | return &pgdat->__lruvec; |
66e1707b BS |
1273 | } |
1274 | ||
e809c3fe MWO |
1275 | static inline |
1276 | void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) | |
2d146aa3 JW |
1277 | { |
1278 | } | |
1279 | ||
b910718a JW |
1280 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
1281 | { | |
1282 | return NULL; | |
1283 | } | |
1284 | ||
587af308 | 1285 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
c0ff4b85 | 1286 | struct mem_cgroup *memcg) |
bed7161a | 1287 | { |
587af308 | 1288 | return true; |
bed7161a BS |
1289 | } |
1290 | ||
d46eb14b SB |
1291 | static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) |
1292 | { | |
1293 | return NULL; | |
1294 | } | |
1295 | ||
c74d40e8 DS |
1296 | static inline |
1297 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) | |
1298 | { | |
1299 | return NULL; | |
1300 | } | |
1301 | ||
f4840ccf JW |
1302 | static inline void obj_cgroup_put(struct obj_cgroup *objcg) |
1303 | { | |
1304 | } | |
1305 | ||
dc0b5864 RG |
1306 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
1307 | { | |
1308 | } | |
1309 | ||
e809c3fe | 1310 | static inline struct lruvec *folio_lruvec_lock(struct folio *folio) |
6168d0da | 1311 | { |
e809c3fe | 1312 | struct pglist_data *pgdat = folio_pgdat(folio); |
6168d0da AS |
1313 | |
1314 | spin_lock(&pgdat->__lruvec.lru_lock); | |
1315 | return &pgdat->__lruvec; | |
1316 | } | |
1317 | ||
e809c3fe | 1318 | static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) |
6168d0da | 1319 | { |
e809c3fe | 1320 | struct pglist_data *pgdat = folio_pgdat(folio); |
6168d0da AS |
1321 | |
1322 | spin_lock_irq(&pgdat->__lruvec.lru_lock); | |
1323 | return &pgdat->__lruvec; | |
1324 | } | |
1325 | ||
e809c3fe | 1326 | static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, |
6168d0da AS |
1327 | unsigned long *flagsp) |
1328 | { | |
e809c3fe | 1329 | struct pglist_data *pgdat = folio_pgdat(folio); |
6168d0da AS |
1330 | |
1331 | spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); | |
1332 | return &pgdat->__lruvec; | |
1333 | } | |
1334 | ||
5660048c JW |
1335 | static inline struct mem_cgroup * |
1336 | mem_cgroup_iter(struct mem_cgroup *root, | |
1337 | struct mem_cgroup *prev, | |
1338 | struct mem_cgroup_reclaim_cookie *reclaim) | |
1339 | { | |
1340 | return NULL; | |
1341 | } | |
1342 | ||
1343 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
1344 | struct mem_cgroup *prev) | |
1345 | { | |
1346 | } | |
1347 | ||
7c5f64f8 VD |
1348 | static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
1349 | int (*fn)(struct task_struct *, void *), void *arg) | |
1350 | { | |
1351 | return 0; | |
1352 | } | |
1353 | ||
23047a96 | 1354 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
f8d66542 | 1355 | { |
23047a96 JW |
1356 | return 0; |
1357 | } | |
1358 | ||
1359 | static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |
1360 | { | |
1361 | WARN_ON_ONCE(id); | |
1362 | /* XXX: This should always return root_mem_cgroup */ | |
1363 | return NULL; | |
f8d66542 | 1364 | } |
a636b327 | 1365 | |
c15187a4 RG |
1366 | #ifdef CONFIG_SHRINKER_DEBUG |
1367 | static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) | |
1368 | { | |
1369 | return 0; | |
1370 | } | |
1371 | ||
1372 | static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) | |
1373 | { | |
1374 | return NULL; | |
1375 | } | |
1376 | #endif | |
1377 | ||
aa9694bb CD |
1378 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
1379 | { | |
1380 | return NULL; | |
1381 | } | |
1382 | ||
2262185c RG |
1383 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
1384 | { | |
1385 | return NULL; | |
1386 | } | |
1387 | ||
eb01aaab | 1388 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
14797e23 | 1389 | { |
13308ca9 | 1390 | return true; |
14797e23 KM |
1391 | } |
1392 | ||
b4536f0c MH |
1393 | static inline |
1394 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
1395 | enum lru_list lru, int zone_idx) | |
1396 | { | |
1397 | return 0; | |
1398 | } | |
a3d8e054 | 1399 | |
bbec2e15 | 1400 | static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) |
7c5f64f8 VD |
1401 | { |
1402 | return 0; | |
1403 | } | |
1404 | ||
9783aa99 CD |
1405 | static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) |
1406 | { | |
1407 | return 0; | |
1408 | } | |
1409 | ||
e222432b | 1410 | static inline void |
f0c867d9 | 1411 | mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
1412 | { | |
1413 | } | |
1414 | ||
1415 | static inline void | |
1416 | mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) | |
e222432b BS |
1417 | { |
1418 | } | |
1419 | ||
1c824a68 | 1420 | static inline void lock_page_memcg(struct page *page) |
89c06bd5 KH |
1421 | { |
1422 | } | |
1423 | ||
62cccb8c | 1424 | static inline void unlock_page_memcg(struct page *page) |
89c06bd5 KH |
1425 | { |
1426 | } | |
1427 | ||
f70ad448 MWO |
1428 | static inline void folio_memcg_lock(struct folio *folio) |
1429 | { | |
1430 | } | |
1431 | ||
1432 | static inline void folio_memcg_unlock(struct folio *folio) | |
1433 | { | |
1434 | } | |
1435 | ||
b23afb93 TH |
1436 | static inline void mem_cgroup_handle_over_high(void) |
1437 | { | |
1438 | } | |
1439 | ||
29ef680a | 1440 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 JW |
1441 | { |
1442 | } | |
1443 | ||
29ef680a | 1444 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 JW |
1445 | { |
1446 | } | |
1447 | ||
3812c8c8 JW |
1448 | static inline bool task_in_memcg_oom(struct task_struct *p) |
1449 | { | |
1450 | return false; | |
1451 | } | |
1452 | ||
49426420 | 1453 | static inline bool mem_cgroup_oom_synchronize(bool wait) |
3812c8c8 JW |
1454 | { |
1455 | return false; | |
1456 | } | |
1457 | ||
3d8b38eb RG |
1458 | static inline struct mem_cgroup *mem_cgroup_get_oom_group( |
1459 | struct task_struct *victim, struct mem_cgroup *oom_domain) | |
1460 | { | |
1461 | return NULL; | |
1462 | } | |
1463 | ||
1464 | static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) | |
1465 | { | |
1466 | } | |
1467 | ||
00f3ca2c | 1468 | static inline void __mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1469 | int idx, |
00f3ca2c | 1470 | int nr) |
2a2e4885 JW |
1471 | { |
1472 | } | |
1473 | ||
00f3ca2c | 1474 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1475 | int idx, |
00f3ca2c | 1476 | int nr) |
2a2e4885 JW |
1477 | { |
1478 | } | |
1479 | ||
4e5aa1f4 SB |
1480 | static inline void mod_memcg_page_state(struct page *page, |
1481 | int idx, int val) | |
1482 | { | |
1483 | } | |
1484 | ||
7490a2d2 SB |
1485 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) |
1486 | { | |
1487 | return 0; | |
1488 | } | |
1489 | ||
42a30035 JW |
1490 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
1491 | enum node_stat_item idx) | |
1492 | { | |
1493 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
1494 | } | |
1495 | ||
205b20cc JW |
1496 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
1497 | enum node_stat_item idx) | |
2a7106f2 | 1498 | { |
00f3ca2c | 1499 | return node_page_state(lruvec_pgdat(lruvec), idx); |
2a7106f2 GT |
1500 | } |
1501 | ||
aa48e47e SB |
1502 | static inline void mem_cgroup_flush_stats(void) |
1503 | { | |
1504 | } | |
1505 | ||
9b301615 SB |
1506 | static inline void mem_cgroup_flush_stats_delayed(void) |
1507 | { | |
1508 | } | |
1509 | ||
eedc4e5a RG |
1510 | static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, |
1511 | enum node_stat_item idx, int val) | |
1512 | { | |
1513 | } | |
1514 | ||
da3ceeff | 1515 | static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
ec9f0238 RG |
1516 | int val) |
1517 | { | |
1518 | struct page *page = virt_to_head_page(p); | |
1519 | ||
1520 | __mod_node_page_state(page_pgdat(page), idx, val); | |
1521 | } | |
1522 | ||
da3ceeff | 1523 | static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, |
991e7673 SB |
1524 | int val) |
1525 | { | |
1526 | struct page *page = virt_to_head_page(p); | |
1527 | ||
1528 | mod_node_page_state(page_pgdat(page), idx, val); | |
1529 | } | |
1530 | ||
2262185c RG |
1531 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
1532 | enum vm_event_item idx, | |
1533 | unsigned long count) | |
1534 | { | |
1535 | } | |
1536 | ||
9851ac13 KT |
1537 | static inline void __count_memcg_events(struct mem_cgroup *memcg, |
1538 | enum vm_event_item idx, | |
1539 | unsigned long count) | |
1540 | { | |
1541 | } | |
1542 | ||
2262185c | 1543 | static inline void count_memcg_page_event(struct page *page, |
04fecbf5 | 1544 | int idx) |
2262185c RG |
1545 | { |
1546 | } | |
1547 | ||
64daa5d8 MWO |
1548 | static inline void count_memcg_folio_events(struct folio *folio, |
1549 | enum vm_event_item idx, unsigned long nr) | |
1550 | { | |
1551 | } | |
1552 | ||
456f998e | 1553 | static inline |
2262185c | 1554 | void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) |
456f998e YH |
1555 | { |
1556 | } | |
6168d0da | 1557 | |
2d146aa3 JW |
1558 | static inline void split_page_memcg(struct page *head, unsigned int nr) |
1559 | { | |
1560 | } | |
1561 | ||
1562 | static inline | |
1563 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | |
1564 | gfp_t gfp_mask, | |
1565 | unsigned long *total_scanned) | |
6168d0da | 1566 | { |
2d146aa3 | 1567 | return 0; |
6168d0da | 1568 | } |
c255a458 | 1569 | #endif /* CONFIG_MEMCG */ |
78fb7466 | 1570 | |
da3ceeff | 1571 | static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) |
ec9f0238 | 1572 | { |
da3ceeff | 1573 | __mod_lruvec_kmem_state(p, idx, 1); |
ec9f0238 RG |
1574 | } |
1575 | ||
da3ceeff | 1576 | static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) |
ec9f0238 | 1577 | { |
da3ceeff | 1578 | __mod_lruvec_kmem_state(p, idx, -1); |
ec9f0238 RG |
1579 | } |
1580 | ||
7cf111bc JW |
1581 | static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) |
1582 | { | |
1583 | struct mem_cgroup *memcg; | |
1584 | ||
1585 | memcg = lruvec_memcg(lruvec); | |
1586 | if (!memcg) | |
1587 | return NULL; | |
1588 | memcg = parent_mem_cgroup(memcg); | |
1589 | if (!memcg) | |
1590 | return NULL; | |
1591 | return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); | |
1592 | } | |
1593 | ||
6168d0da AS |
1594 | static inline void unlock_page_lruvec(struct lruvec *lruvec) |
1595 | { | |
1596 | spin_unlock(&lruvec->lru_lock); | |
1597 | } | |
1598 | ||
1599 | static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) | |
1600 | { | |
1601 | spin_unlock_irq(&lruvec->lru_lock); | |
1602 | } | |
1603 | ||
1604 | static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, | |
1605 | unsigned long flags) | |
1606 | { | |
1607 | spin_unlock_irqrestore(&lruvec->lru_lock, flags); | |
1608 | } | |
1609 | ||
7467c391 | 1610 | /* Test requires a stable page->memcg binding, see page_memcg() */ |
0de340cb MWO |
1611 | static inline bool folio_matches_lruvec(struct folio *folio, |
1612 | struct lruvec *lruvec) | |
f2e4d28d | 1613 | { |
0de340cb MWO |
1614 | return lruvec_pgdat(lruvec) == folio_pgdat(folio) && |
1615 | lruvec_memcg(lruvec) == folio_memcg(folio); | |
f2e4d28d MS |
1616 | } |
1617 | ||
2a5e4e34 | 1618 | /* Don't lock again iff page's lruvec locked */ |
0de340cb | 1619 | static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, |
2a5e4e34 AD |
1620 | struct lruvec *locked_lruvec) |
1621 | { | |
1622 | if (locked_lruvec) { | |
0de340cb | 1623 | if (folio_matches_lruvec(folio, locked_lruvec)) |
2a5e4e34 AD |
1624 | return locked_lruvec; |
1625 | ||
1626 | unlock_page_lruvec_irq(locked_lruvec); | |
1627 | } | |
1628 | ||
e809c3fe | 1629 | return folio_lruvec_lock_irq(folio); |
2a5e4e34 AD |
1630 | } |
1631 | ||
1632 | /* Don't lock again iff page's lruvec locked */ | |
0de340cb | 1633 | static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, |
2a5e4e34 AD |
1634 | struct lruvec *locked_lruvec, unsigned long *flags) |
1635 | { | |
1636 | if (locked_lruvec) { | |
0de340cb | 1637 | if (folio_matches_lruvec(folio, locked_lruvec)) |
2a5e4e34 AD |
1638 | return locked_lruvec; |
1639 | ||
1640 | unlock_page_lruvec_irqrestore(locked_lruvec, *flags); | |
1641 | } | |
1642 | ||
e809c3fe | 1643 | return folio_lruvec_lock_irqsave(folio, flags); |
2a5e4e34 AD |
1644 | } |
1645 | ||
52ebea74 | 1646 | #ifdef CONFIG_CGROUP_WRITEBACK |
841710aa | 1647 | |
841710aa | 1648 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
c5edf9cd TH |
1649 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
1650 | unsigned long *pheadroom, unsigned long *pdirty, | |
1651 | unsigned long *pwriteback); | |
841710aa | 1652 | |
9d8053fc | 1653 | void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, |
97b27821 TH |
1654 | struct bdi_writeback *wb); |
1655 | ||
203a3151 | 1656 | static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, |
97b27821 TH |
1657 | struct bdi_writeback *wb) |
1658 | { | |
08d1d0e6 BH |
1659 | if (mem_cgroup_disabled()) |
1660 | return; | |
1661 | ||
9d8053fc MWO |
1662 | if (unlikely(&folio_memcg(folio)->css != wb->memcg_css)) |
1663 | mem_cgroup_track_foreign_dirty_slowpath(folio, wb); | |
97b27821 TH |
1664 | } |
1665 | ||
1666 | void mem_cgroup_flush_foreign(struct bdi_writeback *wb); | |
1667 | ||
841710aa TH |
1668 | #else /* CONFIG_CGROUP_WRITEBACK */ |
1669 | ||
1670 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |
1671 | { | |
1672 | return NULL; | |
1673 | } | |
1674 | ||
c2aa723a | 1675 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
c5edf9cd TH |
1676 | unsigned long *pfilepages, |
1677 | unsigned long *pheadroom, | |
c2aa723a TH |
1678 | unsigned long *pdirty, |
1679 | unsigned long *pwriteback) | |
1680 | { | |
1681 | } | |
1682 | ||
203a3151 | 1683 | static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, |
97b27821 TH |
1684 | struct bdi_writeback *wb) |
1685 | { | |
1686 | } | |
1687 | ||
1688 | static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) | |
1689 | { | |
1690 | } | |
1691 | ||
841710aa | 1692 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
52ebea74 | 1693 | |
e1aab161 | 1694 | struct sock; |
4b1327be WW |
1695 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, |
1696 | gfp_t gfp_mask); | |
baac50bb | 1697 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
d886f4e4 | 1698 | #ifdef CONFIG_MEMCG |
ef12947c JW |
1699 | extern struct static_key_false memcg_sockets_enabled_key; |
1700 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) | |
2d758073 JW |
1701 | void mem_cgroup_sk_alloc(struct sock *sk); |
1702 | void mem_cgroup_sk_free(struct sock *sk); | |
baac50bb | 1703 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c | 1704 | { |
0db15298 | 1705 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) |
8e8ae645 | 1706 | return true; |
8e8ae645 | 1707 | do { |
7e6ec49c | 1708 | if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) |
8e8ae645 JW |
1709 | return true; |
1710 | } while ((memcg = parent_mem_cgroup(memcg))); | |
1711 | return false; | |
e805605c | 1712 | } |
0a432dcb | 1713 | |
e4262c4f YS |
1714 | int alloc_shrinker_info(struct mem_cgroup *memcg); |
1715 | void free_shrinker_info(struct mem_cgroup *memcg); | |
2bfd3637 | 1716 | void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); |
a178015c | 1717 | void reparent_shrinker_deferred(struct mem_cgroup *memcg); |
e805605c | 1718 | #else |
80e95fe0 | 1719 | #define mem_cgroup_sockets_enabled 0 |
2d758073 JW |
1720 | static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; |
1721 | static inline void mem_cgroup_sk_free(struct sock *sk) { }; | |
baac50bb | 1722 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c JW |
1723 | { |
1724 | return false; | |
1725 | } | |
0a432dcb | 1726 | |
2bfd3637 YS |
1727 | static inline void set_shrinker_bit(struct mem_cgroup *memcg, |
1728 | int nid, int shrinker_id) | |
0a432dcb YS |
1729 | { |
1730 | } | |
e805605c | 1731 | #endif |
7ae1e1d0 | 1732 | |
9b6f7e16 | 1733 | #ifdef CONFIG_MEMCG_KMEM |
4d5c8aed | 1734 | bool mem_cgroup_kmem_disabled(void); |
f4b00eab RG |
1735 | int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); |
1736 | void __memcg_kmem_uncharge_page(struct page *page, int order); | |
45264778 | 1737 | |
bf4f0599 | 1738 | struct obj_cgroup *get_obj_cgroup_from_current(void); |
f4840ccf | 1739 | struct obj_cgroup *get_obj_cgroup_from_page(struct page *page); |
bf4f0599 RG |
1740 | |
1741 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); | |
1742 | void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); | |
1743 | ||
ef12947c | 1744 | extern struct static_key_false memcg_kmem_enabled_key; |
749c5415 | 1745 | |
7ae1e1d0 GC |
1746 | static inline bool memcg_kmem_enabled(void) |
1747 | { | |
eda330e5 | 1748 | return static_branch_likely(&memcg_kmem_enabled_key); |
7ae1e1d0 GC |
1749 | } |
1750 | ||
f4b00eab RG |
1751 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1752 | int order) | |
60cd4bcd SB |
1753 | { |
1754 | if (memcg_kmem_enabled()) | |
f4b00eab | 1755 | return __memcg_kmem_charge_page(page, gfp, order); |
60cd4bcd SB |
1756 | return 0; |
1757 | } | |
1758 | ||
f4b00eab | 1759 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1760 | { |
1761 | if (memcg_kmem_enabled()) | |
f4b00eab | 1762 | __memcg_kmem_uncharge_page(page, order); |
60cd4bcd SB |
1763 | } |
1764 | ||
33398cf2 | 1765 | /* |
a7cb874b RG |
1766 | * A helper for accessing memcg's kmem_id, used for getting |
1767 | * corresponding LRU lists. | |
33398cf2 | 1768 | */ |
7c52f65d | 1769 | static inline int memcg_kmem_id(struct mem_cgroup *memcg) |
33398cf2 MH |
1770 | { |
1771 | return memcg ? memcg->kmemcg_id : -1; | |
1772 | } | |
5722d094 | 1773 | |
8380ce47 | 1774 | struct mem_cgroup *mem_cgroup_from_obj(void *p); |
fc4db90f | 1775 | struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); |
8380ce47 | 1776 | |
f4840ccf JW |
1777 | static inline void count_objcg_event(struct obj_cgroup *objcg, |
1778 | enum vm_event_item idx) | |
1779 | { | |
1780 | struct mem_cgroup *memcg; | |
1781 | ||
1782 | if (mem_cgroup_kmem_disabled()) | |
1783 | return; | |
1784 | ||
1785 | rcu_read_lock(); | |
1786 | memcg = obj_cgroup_memcg(objcg); | |
1787 | count_memcg_events(memcg, idx, 1); | |
1788 | rcu_read_unlock(); | |
1789 | } | |
1790 | ||
1d0403d2 VA |
1791 | /** |
1792 | * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object. | |
1793 | * @p: pointer to object from which memcg should be extracted. It can be NULL. | |
1794 | * | |
1795 | * Retrieves the memory group into which the memory of the pointed kernel | |
1796 | * object is accounted. If memcg is found, its reference is taken. | |
1797 | * If a passed kernel object is uncharged, or if proper memcg cannot be found, | |
1798 | * as well as if mem_cgroup is disabled, NULL is returned. | |
1799 | * | |
1800 | * Return: valid memcg pointer with taken reference or NULL. | |
1801 | */ | |
1802 | static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p) | |
1803 | { | |
1804 | struct mem_cgroup *memcg; | |
1805 | ||
1806 | rcu_read_lock(); | |
1807 | do { | |
1808 | memcg = mem_cgroup_from_obj(p); | |
1809 | } while (memcg && !css_tryget(&memcg->css)); | |
1810 | rcu_read_unlock(); | |
1811 | return memcg; | |
1812 | } | |
1813 | ||
1814 | /** | |
1815 | * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup. | |
1816 | * @memcg: pointer to a valid memory cgroup or NULL. | |
1817 | * | |
1818 | * If passed argument is not NULL, returns it without any additional checks | |
1819 | * and changes. Otherwise, root_mem_cgroup is returned. | |
1820 | * | |
1821 | * NOTE: root_mem_cgroup can be NULL during early boot. | |
1822 | */ | |
1823 | static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg) | |
1824 | { | |
1825 | return memcg ? memcg : root_mem_cgroup; | |
1826 | } | |
7ae1e1d0 | 1827 | #else |
4d5c8aed RG |
1828 | static inline bool mem_cgroup_kmem_disabled(void) |
1829 | { | |
1830 | return true; | |
1831 | } | |
9b6f7e16 | 1832 | |
f4b00eab RG |
1833 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1834 | int order) | |
9b6f7e16 RG |
1835 | { |
1836 | return 0; | |
1837 | } | |
1838 | ||
f4b00eab | 1839 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
9b6f7e16 RG |
1840 | { |
1841 | } | |
1842 | ||
f4b00eab RG |
1843 | static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1844 | int order) | |
60cd4bcd SB |
1845 | { |
1846 | return 0; | |
1847 | } | |
1848 | ||
f4b00eab | 1849 | static inline void __memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1850 | { |
1851 | } | |
1852 | ||
f4840ccf JW |
1853 | static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) |
1854 | { | |
1855 | return NULL; | |
1856 | } | |
1857 | ||
b9ce5ef4 GC |
1858 | static inline bool memcg_kmem_enabled(void) |
1859 | { | |
1860 | return false; | |
1861 | } | |
1862 | ||
7c52f65d | 1863 | static inline int memcg_kmem_id(struct mem_cgroup *memcg) |
2633d7a0 GC |
1864 | { |
1865 | return -1; | |
1866 | } | |
1867 | ||
8380ce47 RG |
1868 | static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) |
1869 | { | |
1d0403d2 | 1870 | return NULL; |
8380ce47 RG |
1871 | } |
1872 | ||
fc4db90f RG |
1873 | static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) |
1874 | { | |
1875 | return NULL; | |
1876 | } | |
1877 | ||
f4840ccf JW |
1878 | static inline void count_objcg_event(struct obj_cgroup *objcg, |
1879 | enum vm_event_item idx) | |
1880 | { | |
1881 | } | |
1882 | ||
1d0403d2 VA |
1883 | static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p) |
1884 | { | |
1885 | return NULL; | |
1886 | } | |
1887 | ||
1888 | static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg) | |
1889 | { | |
1890 | return NULL; | |
1891 | } | |
84c07d11 | 1892 | #endif /* CONFIG_MEMCG_KMEM */ |
127424c8 | 1893 | |
f4840ccf JW |
1894 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) |
1895 | bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); | |
1896 | void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); | |
1897 | void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); | |
1898 | #else | |
1899 | static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) | |
1900 | { | |
1901 | return true; | |
1902 | } | |
1903 | static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, | |
1904 | size_t size) | |
1905 | { | |
1906 | } | |
1907 | static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, | |
1908 | size_t size) | |
1909 | { | |
1910 | } | |
1911 | #endif | |
1912 | ||
8cdea7c0 | 1913 | #endif /* _LINUX_MEMCONTROL_H */ |