1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* memcontrol.h - Memory Controller
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
11 #ifndef _LINUX_MEMCONTROL_H
12 #define _LINUX_MEMCONTROL_H
13 #include <linux/cgroup.h>
14 #include <linux/vm_event_item.h>
15 #include <linux/hardirq.h>
16 #include <linux/jump_label.h>
17 #include <linux/kernel.h>
18 #include <linux/page_counter.h>
19 #include <linux/vmpressure.h>
20 #include <linux/eventfd.h>
22 #include <linux/vmstat.h>
23 #include <linux/writeback.h>
24 #include <linux/page-flags.h>
25 #include <linux/shrinker.h>
33 /* Cgroup-specific page state, on top of universal node page state */
34 enum memcg_stat_item
{
35 MEMCG_SWAP
= NR_VM_NODE_STAT_ITEMS
,
45 enum memcg_memory_event
{
55 MEMCG_NR_MEMORY_EVENTS
,
58 struct mem_cgroup_reclaim_cookie
{
65 #define MEM_CGROUP_ID_SHIFT 16
67 struct mem_cgroup_id
{
72 struct memcg_vmstats_percpu
;
73 struct memcg1_events_percpu
;
75 struct lruvec_stats_percpu
;
78 struct mem_cgroup_reclaim_iter
{
79 struct mem_cgroup
*position
;
80 /* scan generation, increased every round-trip */
85 * per-node information in memory controller.
87 struct mem_cgroup_per_node
{
88 /* Keep the read-only fields at the start */
89 struct mem_cgroup
*memcg
; /* Back pointer, we cannot */
90 /* use container_of */
92 struct lruvec_stats_percpu __percpu
*lruvec_stats_percpu
;
93 struct lruvec_stats
*lruvec_stats
;
94 struct shrinker_info __rcu
*shrinker_info
;
96 #ifdef CONFIG_MEMCG_V1
98 * Memcg-v1 only stuff in middle as buffer between read mostly fields
99 * and update often fields to avoid false sharing. If v1 stuff is
100 * not present, an explicit padding is needed.
103 struct rb_node tree_node
; /* RB tree node */
104 unsigned long usage_in_excess
;/* Set to the value by which */
105 /* the soft limit is exceeded*/
108 CACHELINE_PADDING(_pad1_
);
111 /* Fields which get updated often at the end. */
112 struct lruvec lruvec
;
113 CACHELINE_PADDING(_pad2_
);
114 unsigned long lru_zone_size
[MAX_NR_ZONES
][NR_LRU_LISTS
];
115 struct mem_cgroup_reclaim_iter iter
;
117 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
118 /* slab stats for nmi context */
119 atomic_t slab_reclaimable
;
120 atomic_t slab_unreclaimable
;
124 struct mem_cgroup_threshold
{
125 struct eventfd_ctx
*eventfd
;
126 unsigned long threshold
;
130 struct mem_cgroup_threshold_ary
{
131 /* An array index points to threshold just below or equal to usage. */
132 int current_threshold
;
133 /* Size of entries[] */
135 /* Array of thresholds */
136 struct mem_cgroup_threshold entries
[] __counted_by(size
);
139 struct mem_cgroup_thresholds
{
140 /* Primary thresholds array */
141 struct mem_cgroup_threshold_ary
*primary
;
143 * Spare threshold array.
144 * This is needed to make mem_cgroup_unregister_event() "never fail".
145 * It must be able to store at least primary->size - 1 entries.
147 struct mem_cgroup_threshold_ary
*spare
;
151 * Remember four most recent foreign writebacks with dirty pages in this
152 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
153 * one in a given round, we're likely to catch it later if it keeps
154 * foreign-dirtying, so a fairly low count should be enough.
156 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
158 #define MEMCG_CGWB_FRN_CNT 4
160 struct memcg_cgwb_frn
{
161 u64 bdi_id
; /* bdi->id of the foreign inode */
162 int memcg_id
; /* memcg->css.id of foreign inode */
163 u64 at
; /* jiffies_64 at the time of dirtying */
164 struct wb_completion done
; /* tracks in-flight foreign writebacks */
168 * Bucket for arbitrarily byte-sized objects charged to a memory
169 * cgroup. The bucket can be reparented in one piece when the cgroup
170 * is destroyed, without having to round up the individual references
171 * of all live memory objects in the wild.
174 struct percpu_ref refcnt
;
175 struct mem_cgroup
*memcg
;
176 atomic_t nr_charged_bytes
;
178 struct list_head list
; /* protected by objcg_lock */
184 * The memory controller data structure. The memory controller controls both
185 * page cache and RSS per cgroup. We would eventually like to provide
186 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
187 * to help the administrator determine what knobs to tune.
190 struct cgroup_subsys_state css
;
192 /* Private memcg ID. Used to ID objects that outlive the cgroup */
193 struct mem_cgroup_id id
;
195 /* Accounted resources */
196 struct page_counter memory
; /* Both v1 & v2 */
199 struct page_counter swap
; /* v2 only */
200 struct page_counter memsw
; /* v1 only */
203 /* registered local peak watchers */
204 struct list_head memory_peaks
;
205 struct list_head swap_peaks
;
206 spinlock_t peaks_lock
;
208 /* Range enforcement for interrupt charges */
209 struct work_struct high_work
;
212 unsigned long zswap_max
;
215 * Prevent pages from this memcg from being written back from zswap to
216 * swap, and from being swapped out on zswap store failures.
218 bool zswap_writeback
;
221 /* vmpressure notifications */
222 struct vmpressure vmpressure
;
225 * Should the OOM killer kill all belonging tasks, had it kill one?
231 /* memory.events and memory.events.local */
232 struct cgroup_file events_file
;
233 struct cgroup_file events_local_file
;
235 /* handle for "memory.swap.events" */
236 struct cgroup_file swap_events_file
;
239 struct memcg_vmstats
*vmstats
;
242 atomic_long_t memory_events
[MEMCG_NR_MEMORY_EVENTS
];
243 atomic_long_t memory_events_local
[MEMCG_NR_MEMORY_EVENTS
];
245 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
246 /* MEMCG_KMEM for nmi context */
250 * Hint of reclaim pressure for socket memroy management. Note
251 * that this indicator should NOT be used in legacy cgroup mode
252 * where socket memory is accounted/charged separately.
254 unsigned long socket_pressure
;
258 * memcg->objcg is wiped out as a part of the objcg repaprenting
259 * process. memcg->orig_objcg preserves a pointer (and a reference)
260 * to the original objcg until the end of live of memcg.
262 struct obj_cgroup __rcu
*objcg
;
263 struct obj_cgroup
*orig_objcg
;
264 /* list of inherited objcgs, protected by objcg_lock */
265 struct list_head objcg_list
;
267 struct memcg_vmstats_percpu __percpu
*vmstats_percpu
;
269 #ifdef CONFIG_CGROUP_WRITEBACK
270 struct list_head cgwb_list
;
271 struct wb_domain cgwb_domain
;
272 struct memcg_cgwb_frn cgwb_frn
[MEMCG_CGWB_FRN_CNT
];
275 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
276 struct deferred_split deferred_split_queue
;
279 #ifdef CONFIG_LRU_GEN_WALKS_MMU
280 /* per-memcg mm_struct list */
281 struct lru_gen_mm_list mm_list
;
284 #ifdef CONFIG_MEMCG_V1
285 /* Legacy consumer-oriented counters */
286 struct page_counter kmem
; /* v1 only */
287 struct page_counter tcpmem
; /* v1 only */
289 struct memcg1_events_percpu __percpu
*events_percpu
;
291 unsigned long soft_limit
;
293 /* protected by memcg_oom_lock */
297 /* OOM-Killer disable */
298 int oom_kill_disable
;
300 /* protect arrays of thresholds */
301 struct mutex thresholds_lock
;
303 /* thresholds for memory usage. RCU-protected */
304 struct mem_cgroup_thresholds thresholds
;
306 /* thresholds for mem+swap usage. RCU-protected */
307 struct mem_cgroup_thresholds memsw_thresholds
;
309 /* For oom notifier event fd */
310 struct list_head oom_notify
;
312 /* Legacy tcp memory accounting */
316 /* List of events which userspace want to receive */
317 struct list_head event_list
;
318 spinlock_t event_list_lock
;
319 #endif /* CONFIG_MEMCG_V1 */
321 struct mem_cgroup_per_node
*nodeinfo
[];
325 * size of first charge trial.
326 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
329 #define MEMCG_CHARGE_BATCH 64U
331 extern struct mem_cgroup
*root_mem_cgroup
;
333 enum page_memcg_data_flags
{
334 /* page->memcg_data is a pointer to an slabobj_ext vector */
335 MEMCG_DATA_OBJEXTS
= (1UL << 0),
336 /* page has been accounted as a non-slab kernel page */
337 MEMCG_DATA_KMEM
= (1UL << 1),
338 /* the next bit after the last actual flag */
339 __NR_MEMCG_DATA_FLAGS
= (1UL << 2),
342 #define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
344 #else /* CONFIG_MEMCG */
346 #define __FIRST_OBJEXT_FLAG (1UL << 0)
348 #endif /* CONFIG_MEMCG */
351 /* slabobj_ext vector failed to allocate */
352 OBJEXTS_ALLOC_FAIL
= __FIRST_OBJEXT_FLAG
,
353 /* the next bit after the last actual flag */
354 __NR_OBJEXTS_FLAGS
= (__FIRST_OBJEXT_FLAG
<< 1),
357 #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
361 static inline bool folio_memcg_kmem(struct folio
*folio
);
364 * After the initialization objcg->memcg is always pointing at
365 * a valid memcg, but can be atomically swapped to the parent memcg.
367 * The caller must ensure that the returned memcg won't be released.
369 static inline struct mem_cgroup
*obj_cgroup_memcg(struct obj_cgroup
*objcg
)
371 lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex
));
372 return READ_ONCE(objcg
->memcg
);
376 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
377 * @folio: Pointer to the folio.
379 * Returns a pointer to the memory cgroup associated with the folio,
380 * or NULL. This function assumes that the folio is known to have a
381 * proper memory cgroup pointer. It's not safe to call this function
382 * against some type of folios, e.g. slab folios or ex-slab folios or
385 static inline struct mem_cgroup
*__folio_memcg(struct folio
*folio
)
387 unsigned long memcg_data
= folio
->memcg_data
;
389 VM_BUG_ON_FOLIO(folio_test_slab(folio
), folio
);
390 VM_BUG_ON_FOLIO(memcg_data
& MEMCG_DATA_OBJEXTS
, folio
);
391 VM_BUG_ON_FOLIO(memcg_data
& MEMCG_DATA_KMEM
, folio
);
393 return (struct mem_cgroup
*)(memcg_data
& ~OBJEXTS_FLAGS_MASK
);
397 * __folio_objcg - get the object cgroup associated with a kmem folio.
398 * @folio: Pointer to the folio.
400 * Returns a pointer to the object cgroup associated with the folio,
401 * or NULL. This function assumes that the folio is known to have a
402 * proper object cgroup pointer. It's not safe to call this function
403 * against some type of folios, e.g. slab folios or ex-slab folios or
406 static inline struct obj_cgroup
*__folio_objcg(struct folio
*folio
)
408 unsigned long memcg_data
= folio
->memcg_data
;
410 VM_BUG_ON_FOLIO(folio_test_slab(folio
), folio
);
411 VM_BUG_ON_FOLIO(memcg_data
& MEMCG_DATA_OBJEXTS
, folio
);
412 VM_BUG_ON_FOLIO(!(memcg_data
& MEMCG_DATA_KMEM
), folio
);
414 return (struct obj_cgroup
*)(memcg_data
& ~OBJEXTS_FLAGS_MASK
);
418 * folio_memcg - Get the memory cgroup associated with a folio.
419 * @folio: Pointer to the folio.
421 * Returns a pointer to the memory cgroup associated with the folio,
422 * or NULL. This function assumes that the folio is known to have a
423 * proper memory cgroup pointer. It's not safe to call this function
424 * against some type of folios, e.g. slab folios or ex-slab folios.
426 * For a non-kmem folio any of the following ensures folio and memcg binding
431 * - exclusive reference
433 * For a kmem folio a caller should hold an rcu read lock to protect memcg
434 * associated with a kmem folio from being released.
436 static inline struct mem_cgroup
*folio_memcg(struct folio
*folio
)
438 if (folio_memcg_kmem(folio
))
439 return obj_cgroup_memcg(__folio_objcg(folio
));
440 return __folio_memcg(folio
);
444 * folio_memcg_charged - If a folio is charged to a memory cgroup.
445 * @folio: Pointer to the folio.
447 * Returns true if folio is charged to a memory cgroup, otherwise returns false.
449 static inline bool folio_memcg_charged(struct folio
*folio
)
451 return folio
->memcg_data
!= 0;
455 * folio_memcg_check - Get the memory cgroup associated with a folio.
456 * @folio: Pointer to the folio.
458 * Returns a pointer to the memory cgroup associated with the folio,
459 * or NULL. This function unlike folio_memcg() can take any folio
460 * as an argument. It has to be used in cases when it's not known if a folio
461 * has an associated memory cgroup pointer or an object cgroups vector or
464 * For a non-kmem folio any of the following ensures folio and memcg binding
469 * - exclusive reference
471 * For a kmem folio a caller should hold an rcu read lock to protect memcg
472 * associated with a kmem folio from being released.
474 static inline struct mem_cgroup
*folio_memcg_check(struct folio
*folio
)
477 * Because folio->memcg_data might be changed asynchronously
478 * for slabs, READ_ONCE() should be used here.
480 unsigned long memcg_data
= READ_ONCE(folio
->memcg_data
);
482 if (memcg_data
& MEMCG_DATA_OBJEXTS
)
485 if (memcg_data
& MEMCG_DATA_KMEM
) {
486 struct obj_cgroup
*objcg
;
488 objcg
= (void *)(memcg_data
& ~OBJEXTS_FLAGS_MASK
);
489 return obj_cgroup_memcg(objcg
);
492 return (struct mem_cgroup
*)(memcg_data
& ~OBJEXTS_FLAGS_MASK
);
495 static inline struct mem_cgroup
*page_memcg_check(struct page
*page
)
499 return folio_memcg_check((struct folio
*)page
);
502 static inline struct mem_cgroup
*get_mem_cgroup_from_objcg(struct obj_cgroup
*objcg
)
504 struct mem_cgroup
*memcg
;
508 memcg
= obj_cgroup_memcg(objcg
);
509 if (unlikely(!css_tryget(&memcg
->css
)))
517 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
518 * @folio: Pointer to the folio.
520 * Checks if the folio has MemcgKmem flag set. The caller must ensure
521 * that the folio has an associated memory cgroup. It's not safe to call
522 * this function against some types of folios, e.g. slab folios.
524 static inline bool folio_memcg_kmem(struct folio
*folio
)
526 VM_BUG_ON_PGFLAGS(PageTail(&folio
->page
), &folio
->page
);
527 VM_BUG_ON_FOLIO(folio
->memcg_data
& MEMCG_DATA_OBJEXTS
, folio
);
528 return folio
->memcg_data
& MEMCG_DATA_KMEM
;
531 static inline bool PageMemcgKmem(struct page
*page
)
533 return folio_memcg_kmem(page_folio(page
));
536 static inline bool mem_cgroup_is_root(struct mem_cgroup
*memcg
)
538 return (memcg
== root_mem_cgroup
);
541 static inline bool mem_cgroup_disabled(void)
543 return !cgroup_subsys_enabled(memory_cgrp_subsys
);
546 static inline void mem_cgroup_protection(struct mem_cgroup
*root
,
547 struct mem_cgroup
*memcg
,
553 if (mem_cgroup_disabled())
557 * There is no reclaim protection applied to a targeted reclaim.
558 * We are special casing this specific case here because
559 * mem_cgroup_calculate_protection is not robust enough to keep
560 * the protection invariant for calculated effective values for
561 * parallel reclaimers with different reclaim target. This is
562 * especially a problem for tail memcgs (as they have pages on LRU)
563 * which would want to have effective values 0 for targeted reclaim
564 * but a different value for external reclaim.
567 * Let's have global and A's reclaim in parallel:
569 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
571 * | C (low = 1G, usage = 2.5G)
572 * B (low = 1G, usage = 0.5G)
574 * For the global reclaim
576 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
577 * C.elow = min(C.usage, C.low)
579 * With the effective values resetting we have A reclaim
584 * If the global reclaim races with A's reclaim then
585 * B.elow = C.elow = 0 because children_low_usage > A.elow)
586 * is possible and reclaiming B would be violating the protection.
592 *min
= READ_ONCE(memcg
->memory
.emin
);
593 *low
= READ_ONCE(memcg
->memory
.elow
);
596 void mem_cgroup_calculate_protection(struct mem_cgroup
*root
,
597 struct mem_cgroup
*memcg
);
599 static inline bool mem_cgroup_unprotected(struct mem_cgroup
*target
,
600 struct mem_cgroup
*memcg
)
603 * The root memcg doesn't account charges, and doesn't support
604 * protection. The target memcg's protection is ignored, see
605 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
607 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg
) ||
611 static inline bool mem_cgroup_below_low(struct mem_cgroup
*target
,
612 struct mem_cgroup
*memcg
)
614 if (mem_cgroup_unprotected(target
, memcg
))
617 return READ_ONCE(memcg
->memory
.elow
) >=
618 page_counter_read(&memcg
->memory
);
621 static inline bool mem_cgroup_below_min(struct mem_cgroup
*target
,
622 struct mem_cgroup
*memcg
)
624 if (mem_cgroup_unprotected(target
, memcg
))
627 return READ_ONCE(memcg
->memory
.emin
) >=
628 page_counter_read(&memcg
->memory
);
631 int __mem_cgroup_charge(struct folio
*folio
, struct mm_struct
*mm
, gfp_t gfp
);
634 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
635 * @folio: Folio to charge.
636 * @mm: mm context of the allocating task.
637 * @gfp: Reclaim mode.
639 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
640 * pages according to @gfp if necessary. If @mm is NULL, try to
641 * charge to the active memcg.
643 * Do not use this for folios allocated for swapin.
645 * Return: 0 on success. Otherwise, an error code is returned.
647 static inline int mem_cgroup_charge(struct folio
*folio
, struct mm_struct
*mm
,
650 if (mem_cgroup_disabled())
652 return __mem_cgroup_charge(folio
, mm
, gfp
);
655 int mem_cgroup_charge_hugetlb(struct folio
* folio
, gfp_t gfp
);
657 int mem_cgroup_swapin_charge_folio(struct folio
*folio
, struct mm_struct
*mm
,
658 gfp_t gfp
, swp_entry_t entry
);
660 void __mem_cgroup_uncharge(struct folio
*folio
);
663 * mem_cgroup_uncharge - Uncharge a folio.
664 * @folio: Folio to uncharge.
666 * Uncharge a folio previously charged with mem_cgroup_charge().
668 static inline void mem_cgroup_uncharge(struct folio
*folio
)
670 if (mem_cgroup_disabled())
672 __mem_cgroup_uncharge(folio
);
675 void __mem_cgroup_uncharge_folios(struct folio_batch
*folios
);
676 static inline void mem_cgroup_uncharge_folios(struct folio_batch
*folios
)
678 if (mem_cgroup_disabled())
680 __mem_cgroup_uncharge_folios(folios
);
683 void mem_cgroup_replace_folio(struct folio
*old
, struct folio
*new);
684 void mem_cgroup_migrate(struct folio
*old
, struct folio
*new);
687 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
688 * @memcg: memcg of the wanted lruvec
689 * @pgdat: pglist_data
691 * Returns the lru list vector holding pages for a given @memcg &
692 * @pgdat combination. This can be the node lruvec, if the memory
693 * controller is disabled.
695 static inline struct lruvec
*mem_cgroup_lruvec(struct mem_cgroup
*memcg
,
696 struct pglist_data
*pgdat
)
698 struct mem_cgroup_per_node
*mz
;
699 struct lruvec
*lruvec
;
701 if (mem_cgroup_disabled()) {
702 lruvec
= &pgdat
->__lruvec
;
707 memcg
= root_mem_cgroup
;
709 mz
= memcg
->nodeinfo
[pgdat
->node_id
];
710 lruvec
= &mz
->lruvec
;
713 * Since a node can be onlined after the mem_cgroup was created,
714 * we have to be prepared to initialize lruvec->pgdat here;
715 * and if offlined then reonlined, we need to reinitialize it.
717 if (unlikely(lruvec
->pgdat
!= pgdat
))
718 lruvec
->pgdat
= pgdat
;
723 * folio_lruvec - return lruvec for isolating/putting an LRU folio
724 * @folio: Pointer to the folio.
726 * This function relies on folio->mem_cgroup being stable.
728 static inline struct lruvec
*folio_lruvec(struct folio
*folio
)
730 struct mem_cgroup
*memcg
= folio_memcg(folio
);
732 VM_WARN_ON_ONCE_FOLIO(!memcg
&& !mem_cgroup_disabled(), folio
);
733 return mem_cgroup_lruvec(memcg
, folio_pgdat(folio
));
736 struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
738 struct mem_cgroup
*get_mem_cgroup_from_mm(struct mm_struct
*mm
);
740 struct mem_cgroup
*get_mem_cgroup_from_current(void);
742 struct mem_cgroup
*get_mem_cgroup_from_folio(struct folio
*folio
);
744 struct lruvec
*folio_lruvec_lock(struct folio
*folio
);
745 struct lruvec
*folio_lruvec_lock_irq(struct folio
*folio
);
746 struct lruvec
*folio_lruvec_lock_irqsave(struct folio
*folio
,
747 unsigned long *flags
);
749 #ifdef CONFIG_DEBUG_VM
750 void lruvec_memcg_debug(struct lruvec
*lruvec
, struct folio
*folio
);
753 void lruvec_memcg_debug(struct lruvec
*lruvec
, struct folio
*folio
)
759 struct mem_cgroup
*mem_cgroup_from_css(struct cgroup_subsys_state
*css
){
760 return css
? container_of(css
, struct mem_cgroup
, css
) : NULL
;
763 static inline bool obj_cgroup_tryget(struct obj_cgroup
*objcg
)
765 return percpu_ref_tryget(&objcg
->refcnt
);
768 static inline void obj_cgroup_get(struct obj_cgroup
*objcg
)
770 percpu_ref_get(&objcg
->refcnt
);
773 static inline void obj_cgroup_get_many(struct obj_cgroup
*objcg
,
776 percpu_ref_get_many(&objcg
->refcnt
, nr
);
779 static inline void obj_cgroup_put(struct obj_cgroup
*objcg
)
782 percpu_ref_put(&objcg
->refcnt
);
785 static inline bool mem_cgroup_tryget(struct mem_cgroup
*memcg
)
787 return !memcg
|| css_tryget(&memcg
->css
);
790 static inline bool mem_cgroup_tryget_online(struct mem_cgroup
*memcg
)
792 return !memcg
|| css_tryget_online(&memcg
->css
);
795 static inline void mem_cgroup_put(struct mem_cgroup
*memcg
)
798 css_put(&memcg
->css
);
801 #define mem_cgroup_from_counter(counter, member) \
802 container_of(counter, struct mem_cgroup, member)
804 struct mem_cgroup
*mem_cgroup_iter(struct mem_cgroup
*,
806 struct mem_cgroup_reclaim_cookie
*);
807 void mem_cgroup_iter_break(struct mem_cgroup
*, struct mem_cgroup
*);
808 void mem_cgroup_scan_tasks(struct mem_cgroup
*memcg
,
809 int (*)(struct task_struct
*, void *), void *arg
);
811 static inline unsigned short mem_cgroup_id(struct mem_cgroup
*memcg
)
813 if (mem_cgroup_disabled())
818 struct mem_cgroup
*mem_cgroup_from_id(unsigned short id
);
820 #ifdef CONFIG_SHRINKER_DEBUG
821 static inline unsigned long mem_cgroup_ino(struct mem_cgroup
*memcg
)
823 return memcg
? cgroup_ino(memcg
->css
.cgroup
) : 0;
826 struct mem_cgroup
*mem_cgroup_get_from_ino(unsigned long ino
);
829 static inline struct mem_cgroup
*mem_cgroup_from_seq(struct seq_file
*m
)
831 return mem_cgroup_from_css(seq_css(m
));
834 static inline struct mem_cgroup
*lruvec_memcg(struct lruvec
*lruvec
)
836 struct mem_cgroup_per_node
*mz
;
838 if (mem_cgroup_disabled())
841 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
846 * parent_mem_cgroup - find the accounting parent of a memcg
847 * @memcg: memcg whose parent to find
849 * Returns the parent memcg, or NULL if this is the root.
851 static inline struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
)
853 return mem_cgroup_from_css(memcg
->css
.parent
);
856 static inline bool mem_cgroup_is_descendant(struct mem_cgroup
*memcg
,
857 struct mem_cgroup
*root
)
861 return cgroup_is_descendant(memcg
->css
.cgroup
, root
->css
.cgroup
);
864 static inline bool mm_match_cgroup(struct mm_struct
*mm
,
865 struct mem_cgroup
*memcg
)
867 struct mem_cgroup
*task_memcg
;
871 task_memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
873 match
= mem_cgroup_is_descendant(task_memcg
, memcg
);
878 struct cgroup_subsys_state
*mem_cgroup_css_from_folio(struct folio
*folio
);
879 ino_t
page_cgroup_ino(struct page
*page
);
881 static inline bool mem_cgroup_online(struct mem_cgroup
*memcg
)
883 if (mem_cgroup_disabled())
885 return !!(memcg
->css
.flags
& CSS_ONLINE
);
888 void mem_cgroup_update_lru_size(struct lruvec
*lruvec
, enum lru_list lru
,
889 int zid
, int nr_pages
);
892 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec
*lruvec
,
893 enum lru_list lru
, int zone_idx
)
895 struct mem_cgroup_per_node
*mz
;
897 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
898 return READ_ONCE(mz
->lru_zone_size
[zone_idx
][lru
]);
901 void mem_cgroup_handle_over_high(gfp_t gfp_mask
);
903 unsigned long mem_cgroup_get_max(struct mem_cgroup
*memcg
);
905 unsigned long mem_cgroup_size(struct mem_cgroup
*memcg
);
907 void mem_cgroup_print_oom_context(struct mem_cgroup
*memcg
,
908 struct task_struct
*p
);
910 void mem_cgroup_print_oom_meminfo(struct mem_cgroup
*memcg
);
912 struct mem_cgroup
*mem_cgroup_get_oom_group(struct task_struct
*victim
,
913 struct mem_cgroup
*oom_domain
);
914 void mem_cgroup_print_oom_group(struct mem_cgroup
*memcg
);
916 /* idx can be of type enum memcg_stat_item or node_stat_item */
917 void mod_memcg_state(struct mem_cgroup
*memcg
,
918 enum memcg_stat_item idx
, int val
);
920 static inline void mod_memcg_page_state(struct page
*page
,
921 enum memcg_stat_item idx
, int val
)
923 struct mem_cgroup
*memcg
;
925 if (mem_cgroup_disabled())
929 memcg
= folio_memcg(page_folio(page
));
931 mod_memcg_state(memcg
, idx
, val
);
935 unsigned long memcg_page_state(struct mem_cgroup
*memcg
, int idx
);
936 unsigned long lruvec_page_state(struct lruvec
*lruvec
, enum node_stat_item idx
);
937 unsigned long lruvec_page_state_local(struct lruvec
*lruvec
,
938 enum node_stat_item idx
);
940 void mem_cgroup_flush_stats(struct mem_cgroup
*memcg
);
941 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup
*memcg
);
943 void __mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
, int val
);
945 static inline void mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
,
950 local_irq_save(flags
);
951 __mod_lruvec_kmem_state(p
, idx
, val
);
952 local_irq_restore(flags
);
955 void count_memcg_events(struct mem_cgroup
*memcg
, enum vm_event_item idx
,
956 unsigned long count
);
958 static inline void count_memcg_folio_events(struct folio
*folio
,
959 enum vm_event_item idx
, unsigned long nr
)
961 struct mem_cgroup
*memcg
= folio_memcg(folio
);
964 count_memcg_events(memcg
, idx
, nr
);
967 static inline void count_memcg_events_mm(struct mm_struct
*mm
,
968 enum vm_event_item idx
, unsigned long count
)
970 struct mem_cgroup
*memcg
;
972 if (mem_cgroup_disabled())
976 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
978 count_memcg_events(memcg
, idx
, count
);
982 static inline void count_memcg_event_mm(struct mm_struct
*mm
,
983 enum vm_event_item idx
)
985 count_memcg_events_mm(mm
, idx
, 1);
988 static inline void memcg_memory_event(struct mem_cgroup
*memcg
,
989 enum memcg_memory_event event
)
991 bool swap_event
= event
== MEMCG_SWAP_HIGH
|| event
== MEMCG_SWAP_MAX
||
992 event
== MEMCG_SWAP_FAIL
;
994 atomic_long_inc(&memcg
->memory_events_local
[event
]);
996 cgroup_file_notify(&memcg
->events_local_file
);
999 atomic_long_inc(&memcg
->memory_events
[event
]);
1001 cgroup_file_notify(&memcg
->swap_events_file
);
1003 cgroup_file_notify(&memcg
->events_file
);
1005 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys
))
1007 if (cgrp_dfl_root
.flags
& CGRP_ROOT_MEMORY_LOCAL_EVENTS
)
1009 } while ((memcg
= parent_mem_cgroup(memcg
)) &&
1010 !mem_cgroup_is_root(memcg
));
1013 static inline void memcg_memory_event_mm(struct mm_struct
*mm
,
1014 enum memcg_memory_event event
)
1016 struct mem_cgroup
*memcg
;
1018 if (mem_cgroup_disabled())
1022 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
1024 memcg_memory_event(memcg
, event
);
1028 void split_page_memcg(struct page
*first
, unsigned order
);
1029 void folio_split_memcg_refs(struct folio
*folio
, unsigned old_order
,
1030 unsigned new_order
);
1032 static inline u64
cgroup_id_from_mm(struct mm_struct
*mm
)
1034 struct mem_cgroup
*memcg
;
1037 if (mem_cgroup_disabled())
1041 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
1043 memcg
= root_mem_cgroup
;
1044 id
= cgroup_id(memcg
->css
.cgroup
);
1049 extern int mem_cgroup_init(void);
1050 #else /* CONFIG_MEMCG */
1052 #define MEM_CGROUP_ID_SHIFT 0
1054 static inline struct mem_cgroup
*folio_memcg(struct folio
*folio
)
1059 static inline bool folio_memcg_charged(struct folio
*folio
)
1064 static inline struct mem_cgroup
*folio_memcg_check(struct folio
*folio
)
1069 static inline struct mem_cgroup
*page_memcg_check(struct page
*page
)
1074 static inline struct mem_cgroup
*get_mem_cgroup_from_objcg(struct obj_cgroup
*objcg
)
1079 static inline bool folio_memcg_kmem(struct folio
*folio
)
1084 static inline bool PageMemcgKmem(struct page
*page
)
1089 static inline bool mem_cgroup_is_root(struct mem_cgroup
*memcg
)
1094 static inline bool mem_cgroup_disabled(void)
1099 static inline void memcg_memory_event(struct mem_cgroup
*memcg
,
1100 enum memcg_memory_event event
)
1104 static inline void memcg_memory_event_mm(struct mm_struct
*mm
,
1105 enum memcg_memory_event event
)
1109 static inline void mem_cgroup_protection(struct mem_cgroup
*root
,
1110 struct mem_cgroup
*memcg
,
1117 static inline void mem_cgroup_calculate_protection(struct mem_cgroup
*root
,
1118 struct mem_cgroup
*memcg
)
1122 static inline bool mem_cgroup_unprotected(struct mem_cgroup
*target
,
1123 struct mem_cgroup
*memcg
)
1127 static inline bool mem_cgroup_below_low(struct mem_cgroup
*target
,
1128 struct mem_cgroup
*memcg
)
1133 static inline bool mem_cgroup_below_min(struct mem_cgroup
*target
,
1134 struct mem_cgroup
*memcg
)
1139 static inline int mem_cgroup_charge(struct folio
*folio
,
1140 struct mm_struct
*mm
, gfp_t gfp
)
1145 static inline int mem_cgroup_charge_hugetlb(struct folio
* folio
, gfp_t gfp
)
1150 static inline int mem_cgroup_swapin_charge_folio(struct folio
*folio
,
1151 struct mm_struct
*mm
, gfp_t gfp
, swp_entry_t entry
)
1156 static inline void mem_cgroup_uncharge(struct folio
*folio
)
1160 static inline void mem_cgroup_uncharge_folios(struct folio_batch
*folios
)
1164 static inline void mem_cgroup_replace_folio(struct folio
*old
,
1169 static inline void mem_cgroup_migrate(struct folio
*old
, struct folio
*new)
1173 static inline struct lruvec
*mem_cgroup_lruvec(struct mem_cgroup
*memcg
,
1174 struct pglist_data
*pgdat
)
1176 return &pgdat
->__lruvec
;
1179 static inline struct lruvec
*folio_lruvec(struct folio
*folio
)
1181 struct pglist_data
*pgdat
= folio_pgdat(folio
);
1182 return &pgdat
->__lruvec
;
1186 void lruvec_memcg_debug(struct lruvec
*lruvec
, struct folio
*folio
)
1190 static inline struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
)
1195 static inline bool mm_match_cgroup(struct mm_struct
*mm
,
1196 struct mem_cgroup
*memcg
)
1201 static inline struct mem_cgroup
*get_mem_cgroup_from_mm(struct mm_struct
*mm
)
1206 static inline struct mem_cgroup
*get_mem_cgroup_from_current(void)
1211 static inline struct mem_cgroup
*get_mem_cgroup_from_folio(struct folio
*folio
)
1217 struct mem_cgroup
*mem_cgroup_from_css(struct cgroup_subsys_state
*css
)
1222 static inline void obj_cgroup_get(struct obj_cgroup
*objcg
)
1226 static inline void obj_cgroup_put(struct obj_cgroup
*objcg
)
1230 static inline bool mem_cgroup_tryget(struct mem_cgroup
*memcg
)
1235 static inline bool mem_cgroup_tryget_online(struct mem_cgroup
*memcg
)
1240 static inline void mem_cgroup_put(struct mem_cgroup
*memcg
)
1244 static inline struct lruvec
*folio_lruvec_lock(struct folio
*folio
)
1246 struct pglist_data
*pgdat
= folio_pgdat(folio
);
1248 spin_lock(&pgdat
->__lruvec
.lru_lock
);
1249 return &pgdat
->__lruvec
;
1252 static inline struct lruvec
*folio_lruvec_lock_irq(struct folio
*folio
)
1254 struct pglist_data
*pgdat
= folio_pgdat(folio
);
1256 spin_lock_irq(&pgdat
->__lruvec
.lru_lock
);
1257 return &pgdat
->__lruvec
;
1260 static inline struct lruvec
*folio_lruvec_lock_irqsave(struct folio
*folio
,
1261 unsigned long *flagsp
)
1263 struct pglist_data
*pgdat
= folio_pgdat(folio
);
1265 spin_lock_irqsave(&pgdat
->__lruvec
.lru_lock
, *flagsp
);
1266 return &pgdat
->__lruvec
;
1269 static inline struct mem_cgroup
*
1270 mem_cgroup_iter(struct mem_cgroup
*root
,
1271 struct mem_cgroup
*prev
,
1272 struct mem_cgroup_reclaim_cookie
*reclaim
)
1277 static inline void mem_cgroup_iter_break(struct mem_cgroup
*root
,
1278 struct mem_cgroup
*prev
)
1282 static inline void mem_cgroup_scan_tasks(struct mem_cgroup
*memcg
,
1283 int (*fn
)(struct task_struct
*, void *), void *arg
)
1287 static inline unsigned short mem_cgroup_id(struct mem_cgroup
*memcg
)
1292 static inline struct mem_cgroup
*mem_cgroup_from_id(unsigned short id
)
1295 /* XXX: This should always return root_mem_cgroup */
1299 #ifdef CONFIG_SHRINKER_DEBUG
1300 static inline unsigned long mem_cgroup_ino(struct mem_cgroup
*memcg
)
1305 static inline struct mem_cgroup
*mem_cgroup_get_from_ino(unsigned long ino
)
1311 static inline struct mem_cgroup
*mem_cgroup_from_seq(struct seq_file
*m
)
1316 static inline struct mem_cgroup
*lruvec_memcg(struct lruvec
*lruvec
)
1321 static inline bool mem_cgroup_online(struct mem_cgroup
*memcg
)
1327 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec
*lruvec
,
1328 enum lru_list lru
, int zone_idx
)
1333 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup
*memcg
)
1338 static inline unsigned long mem_cgroup_size(struct mem_cgroup
*memcg
)
1344 mem_cgroup_print_oom_context(struct mem_cgroup
*memcg
, struct task_struct
*p
)
1349 mem_cgroup_print_oom_meminfo(struct mem_cgroup
*memcg
)
1353 static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask
)
1357 static inline struct mem_cgroup
*mem_cgroup_get_oom_group(
1358 struct task_struct
*victim
, struct mem_cgroup
*oom_domain
)
1363 static inline void mem_cgroup_print_oom_group(struct mem_cgroup
*memcg
)
1367 static inline void mod_memcg_state(struct mem_cgroup
*memcg
,
1368 enum memcg_stat_item idx
,
1373 static inline void mod_memcg_page_state(struct page
*page
,
1374 enum memcg_stat_item idx
, int val
)
1378 static inline unsigned long memcg_page_state(struct mem_cgroup
*memcg
, int idx
)
1383 static inline unsigned long lruvec_page_state(struct lruvec
*lruvec
,
1384 enum node_stat_item idx
)
1386 return node_page_state(lruvec_pgdat(lruvec
), idx
);
1389 static inline unsigned long lruvec_page_state_local(struct lruvec
*lruvec
,
1390 enum node_stat_item idx
)
1392 return node_page_state(lruvec_pgdat(lruvec
), idx
);
1395 static inline void mem_cgroup_flush_stats(struct mem_cgroup
*memcg
)
1399 static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup
*memcg
)
1403 static inline void __mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
,
1406 struct page
*page
= virt_to_head_page(p
);
1408 __mod_node_page_state(page_pgdat(page
), idx
, val
);
1411 static inline void mod_lruvec_kmem_state(void *p
, enum node_stat_item idx
,
1414 struct page
*page
= virt_to_head_page(p
);
1416 mod_node_page_state(page_pgdat(page
), idx
, val
);
1419 static inline void count_memcg_events(struct mem_cgroup
*memcg
,
1420 enum vm_event_item idx
,
1421 unsigned long count
)
1425 static inline void count_memcg_folio_events(struct folio
*folio
,
1426 enum vm_event_item idx
, unsigned long nr
)
1430 static inline void count_memcg_events_mm(struct mm_struct
*mm
,
1431 enum vm_event_item idx
, unsigned long count
)
1436 void count_memcg_event_mm(struct mm_struct
*mm
, enum vm_event_item idx
)
1440 static inline void split_page_memcg(struct page
*first
, unsigned order
)
1444 static inline void folio_split_memcg_refs(struct folio
*folio
,
1445 unsigned old_order
, unsigned new_order
)
1449 static inline u64
cgroup_id_from_mm(struct mm_struct
*mm
)
1454 static inline int mem_cgroup_init(void) { return 0; }
1455 #endif /* CONFIG_MEMCG */
1458 * Extended information for slab objects stored as an array in page->memcg_data
1459 * if MEMCG_DATA_OBJEXTS is set.
1461 struct slabobj_ext
{
1463 struct obj_cgroup
*objcg
;
1465 #ifdef CONFIG_MEM_ALLOC_PROFILING
1466 union codetag_ref ref
;
1470 static inline void __inc_lruvec_kmem_state(void *p
, enum node_stat_item idx
)
1472 __mod_lruvec_kmem_state(p
, idx
, 1);
1475 static inline void __dec_lruvec_kmem_state(void *p
, enum node_stat_item idx
)
1477 __mod_lruvec_kmem_state(p
, idx
, -1);
1480 static inline struct lruvec
*parent_lruvec(struct lruvec
*lruvec
)
1482 struct mem_cgroup
*memcg
;
1484 memcg
= lruvec_memcg(lruvec
);
1487 memcg
= parent_mem_cgroup(memcg
);
1490 return mem_cgroup_lruvec(memcg
, lruvec_pgdat(lruvec
));
1493 static inline void unlock_page_lruvec(struct lruvec
*lruvec
)
1495 spin_unlock(&lruvec
->lru_lock
);
1498 static inline void unlock_page_lruvec_irq(struct lruvec
*lruvec
)
1500 spin_unlock_irq(&lruvec
->lru_lock
);
1503 static inline void unlock_page_lruvec_irqrestore(struct lruvec
*lruvec
,
1504 unsigned long flags
)
1506 spin_unlock_irqrestore(&lruvec
->lru_lock
, flags
);
1509 /* Test requires a stable folio->memcg binding, see folio_memcg() */
1510 static inline bool folio_matches_lruvec(struct folio
*folio
,
1511 struct lruvec
*lruvec
)
1513 return lruvec_pgdat(lruvec
) == folio_pgdat(folio
) &&
1514 lruvec_memcg(lruvec
) == folio_memcg(folio
);
1517 /* Don't lock again iff page's lruvec locked */
1518 static inline struct lruvec
*folio_lruvec_relock_irq(struct folio
*folio
,
1519 struct lruvec
*locked_lruvec
)
1521 if (locked_lruvec
) {
1522 if (folio_matches_lruvec(folio
, locked_lruvec
))
1523 return locked_lruvec
;
1525 unlock_page_lruvec_irq(locked_lruvec
);
1528 return folio_lruvec_lock_irq(folio
);
1531 /* Don't lock again iff folio's lruvec locked */
1532 static inline void folio_lruvec_relock_irqsave(struct folio
*folio
,
1533 struct lruvec
**lruvecp
, unsigned long *flags
)
1536 if (folio_matches_lruvec(folio
, *lruvecp
))
1539 unlock_page_lruvec_irqrestore(*lruvecp
, *flags
);
1542 *lruvecp
= folio_lruvec_lock_irqsave(folio
, flags
);
1545 #ifdef CONFIG_CGROUP_WRITEBACK
1547 struct wb_domain
*mem_cgroup_wb_domain(struct bdi_writeback
*wb
);
1548 void mem_cgroup_wb_stats(struct bdi_writeback
*wb
, unsigned long *pfilepages
,
1549 unsigned long *pheadroom
, unsigned long *pdirty
,
1550 unsigned long *pwriteback
);
1552 void mem_cgroup_track_foreign_dirty_slowpath(struct folio
*folio
,
1553 struct bdi_writeback
*wb
);
1555 static inline void mem_cgroup_track_foreign_dirty(struct folio
*folio
,
1556 struct bdi_writeback
*wb
)
1558 struct mem_cgroup
*memcg
;
1560 if (mem_cgroup_disabled())
1563 memcg
= folio_memcg(folio
);
1564 if (unlikely(memcg
&& &memcg
->css
!= wb
->memcg_css
))
1565 mem_cgroup_track_foreign_dirty_slowpath(folio
, wb
);
1568 void mem_cgroup_flush_foreign(struct bdi_writeback
*wb
);
1570 #else /* CONFIG_CGROUP_WRITEBACK */
1572 static inline struct wb_domain
*mem_cgroup_wb_domain(struct bdi_writeback
*wb
)
1577 static inline void mem_cgroup_wb_stats(struct bdi_writeback
*wb
,
1578 unsigned long *pfilepages
,
1579 unsigned long *pheadroom
,
1580 unsigned long *pdirty
,
1581 unsigned long *pwriteback
)
1585 static inline void mem_cgroup_track_foreign_dirty(struct folio
*folio
,
1586 struct bdi_writeback
*wb
)
1590 static inline void mem_cgroup_flush_foreign(struct bdi_writeback
*wb
)
1594 #endif /* CONFIG_CGROUP_WRITEBACK */
1597 bool mem_cgroup_charge_skmem(struct mem_cgroup
*memcg
, unsigned int nr_pages
,
1599 void mem_cgroup_uncharge_skmem(struct mem_cgroup
*memcg
, unsigned int nr_pages
);
1601 extern struct static_key_false memcg_sockets_enabled_key
;
1602 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1603 void mem_cgroup_sk_alloc(struct sock
*sk
);
1604 void mem_cgroup_sk_free(struct sock
*sk
);
1605 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup
*memcg
)
1607 #ifdef CONFIG_MEMCG_V1
1608 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys
))
1609 return !!memcg
->tcpmem_pressure
;
1610 #endif /* CONFIG_MEMCG_V1 */
1612 if (time_before(jiffies
, READ_ONCE(memcg
->socket_pressure
)))
1614 } while ((memcg
= parent_mem_cgroup(memcg
)));
1618 int alloc_shrinker_info(struct mem_cgroup
*memcg
);
1619 void free_shrinker_info(struct mem_cgroup
*memcg
);
1620 void set_shrinker_bit(struct mem_cgroup
*memcg
, int nid
, int shrinker_id
);
1621 void reparent_shrinker_deferred(struct mem_cgroup
*memcg
);
1623 #define mem_cgroup_sockets_enabled 0
1624 static inline void mem_cgroup_sk_alloc(struct sock
*sk
) { };
1625 static inline void mem_cgroup_sk_free(struct sock
*sk
) { };
1626 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup
*memcg
)
1631 static inline void set_shrinker_bit(struct mem_cgroup
*memcg
,
1632 int nid
, int shrinker_id
)
1638 bool mem_cgroup_kmem_disabled(void);
1639 int __memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
, int order
);
1640 void __memcg_kmem_uncharge_page(struct page
*page
, int order
);
1643 * The returned objcg pointer is safe to use without additional
1644 * protection within a scope. The scope is defined either by
1645 * the current task (similar to the "current" global variable)
1646 * or by set_active_memcg() pair.
1647 * Please, use obj_cgroup_get() to get a reference if the pointer
1648 * needs to be used outside of the local scope.
1650 struct obj_cgroup
*current_obj_cgroup(void);
1651 struct obj_cgroup
*get_obj_cgroup_from_folio(struct folio
*folio
);
1653 static inline struct obj_cgroup
*get_obj_cgroup_from_current(void)
1655 struct obj_cgroup
*objcg
= current_obj_cgroup();
1658 obj_cgroup_get(objcg
);
1663 int obj_cgroup_charge(struct obj_cgroup
*objcg
, gfp_t gfp
, size_t size
);
1664 void obj_cgroup_uncharge(struct obj_cgroup
*objcg
, size_t size
);
1666 extern struct static_key_false memcg_bpf_enabled_key
;
1667 static inline bool memcg_bpf_enabled(void)
1669 return static_branch_likely(&memcg_bpf_enabled_key
);
1672 extern struct static_key_false memcg_kmem_online_key
;
1674 static inline bool memcg_kmem_online(void)
1676 return static_branch_likely(&memcg_kmem_online_key
);
1679 static inline int memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
,
1682 if (memcg_kmem_online())
1683 return __memcg_kmem_charge_page(page
, gfp
, order
);
1687 static inline void memcg_kmem_uncharge_page(struct page
*page
, int order
)
1689 if (memcg_kmem_online())
1690 __memcg_kmem_uncharge_page(page
, order
);
1694 * A helper for accessing memcg's kmem_id, used for getting
1695 * corresponding LRU lists.
1697 static inline int memcg_kmem_id(struct mem_cgroup
*memcg
)
1699 return memcg
? memcg
->kmemcg_id
: -1;
1702 struct mem_cgroup
*mem_cgroup_from_slab_obj(void *p
);
1704 static inline void count_objcg_events(struct obj_cgroup
*objcg
,
1705 enum vm_event_item idx
,
1706 unsigned long count
)
1708 struct mem_cgroup
*memcg
;
1710 if (!memcg_kmem_online())
1714 memcg
= obj_cgroup_memcg(objcg
);
1715 count_memcg_events(memcg
, idx
, count
);
1719 bool mem_cgroup_node_allowed(struct mem_cgroup
*memcg
, int nid
);
1722 static inline bool mem_cgroup_kmem_disabled(void)
1727 static inline int memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
,
1733 static inline void memcg_kmem_uncharge_page(struct page
*page
, int order
)
1737 static inline int __memcg_kmem_charge_page(struct page
*page
, gfp_t gfp
,
1743 static inline void __memcg_kmem_uncharge_page(struct page
*page
, int order
)
1747 static inline struct obj_cgroup
*get_obj_cgroup_from_folio(struct folio
*folio
)
1752 static inline bool memcg_bpf_enabled(void)
1757 static inline bool memcg_kmem_online(void)
1762 static inline int memcg_kmem_id(struct mem_cgroup
*memcg
)
1767 static inline struct mem_cgroup
*mem_cgroup_from_slab_obj(void *p
)
1772 static inline void count_objcg_events(struct obj_cgroup
*objcg
,
1773 enum vm_event_item idx
,
1774 unsigned long count
)
1778 static inline ino_t
page_cgroup_ino(struct page
*page
)
1783 static inline bool mem_cgroup_node_allowed(struct mem_cgroup
*memcg
, int nid
)
1787 #endif /* CONFIG_MEMCG */
1789 #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
1790 bool obj_cgroup_may_zswap(struct obj_cgroup
*objcg
);
1791 void obj_cgroup_charge_zswap(struct obj_cgroup
*objcg
, size_t size
);
1792 void obj_cgroup_uncharge_zswap(struct obj_cgroup
*objcg
, size_t size
);
1793 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup
*memcg
);
1795 static inline bool obj_cgroup_may_zswap(struct obj_cgroup
*objcg
)
1799 static inline void obj_cgroup_charge_zswap(struct obj_cgroup
*objcg
,
1803 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup
*objcg
,
1807 static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup
*memcg
)
1809 /* if zswap is disabled, do not block pages going to the swapping device */
1815 /* Cgroup v1-related declarations */
1817 #ifdef CONFIG_MEMCG_V1
1818 unsigned long memcg1_soft_limit_reclaim(pg_data_t
*pgdat
, int order
,
1820 unsigned long *total_scanned
);
1822 bool mem_cgroup_oom_synchronize(bool wait
);
1824 static inline bool task_in_memcg_oom(struct task_struct
*p
)
1826 return p
->memcg_in_oom
;
1829 static inline void mem_cgroup_enter_user_fault(void)
1831 WARN_ON(current
->in_user_fault
);
1832 current
->in_user_fault
= 1;
1835 static inline void mem_cgroup_exit_user_fault(void)
1837 WARN_ON(!current
->in_user_fault
);
1838 current
->in_user_fault
= 0;
1841 void memcg1_swapout(struct folio
*folio
, swp_entry_t entry
);
1842 void memcg1_swapin(swp_entry_t entry
, unsigned int nr_pages
);
1844 #else /* CONFIG_MEMCG_V1 */
1846 unsigned long memcg1_soft_limit_reclaim(pg_data_t
*pgdat
, int order
,
1848 unsigned long *total_scanned
)
1853 static inline bool task_in_memcg_oom(struct task_struct
*p
)
1858 static inline bool mem_cgroup_oom_synchronize(bool wait
)
1863 static inline void mem_cgroup_enter_user_fault(void)
1867 static inline void mem_cgroup_exit_user_fault(void)
1871 static inline void memcg1_swapout(struct folio
*folio
, swp_entry_t entry
)
1875 static inline void memcg1_swapin(swp_entry_t entry
, unsigned int nr_pages
)
1879 #endif /* CONFIG_MEMCG_V1 */
1881 #endif /* _LINUX_MEMCONTROL_H */