1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26 #include <linux/page_counter.h>
27 #include <linux/vmpressure.h>
28 #include <linux/eventfd.h>
30 #include <linux/vmstat.h>
31 #include <linux/writeback.h>
32 #include <linux/page-flags.h>
39 /* Cgroup-specific page state, on top of universal node page state */
40 enum memcg_stat_item
{
41 MEMCG_CACHE
= NR_VM_NODE_STAT_ITEMS
,
46 /* XXX: why are these zone and not node counters? */
47 MEMCG_KERNEL_STACK_KB
,
51 enum memcg_memory_event
{
59 MEMCG_NR_MEMORY_EVENTS
,
62 enum mem_cgroup_protection
{
68 struct mem_cgroup_reclaim_cookie
{
71 unsigned int generation
;
76 #define MEM_CGROUP_ID_SHIFT 16
77 #define MEM_CGROUP_ID_MAX USHRT_MAX
79 struct mem_cgroup_id
{
85 * Per memcg event counter is incremented at every pagein/pageout. With THP,
86 * it will be incremated by the number of pages. This counter is used for
87 * for trigger some periodic events. This is straightforward and better
88 * than using jiffies etc. to handle periodic memcg event.
90 enum mem_cgroup_events_target
{
91 MEM_CGROUP_TARGET_THRESH
,
92 MEM_CGROUP_TARGET_SOFTLIMIT
,
93 MEM_CGROUP_TARGET_NUMAINFO
,
97 struct mem_cgroup_stat_cpu
{
98 long count
[MEMCG_NR_STAT
];
99 unsigned long events
[NR_VM_EVENT_ITEMS
];
100 unsigned long nr_page_events
;
101 unsigned long targets
[MEM_CGROUP_NTARGETS
];
104 struct mem_cgroup_reclaim_iter
{
105 struct mem_cgroup
*position
;
106 /* scan generation, increased every round-trip */
107 unsigned int generation
;
111 long count
[NR_VM_NODE_STAT_ITEMS
];
115 * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
116 * which have elements charged to this memcg.
118 struct memcg_shrinker_map
{
120 unsigned long map
[0];
124 * per-zone information in memory controller.
126 struct mem_cgroup_per_node
{
127 struct lruvec lruvec
;
129 struct lruvec_stat __percpu
*lruvec_stat_cpu
;
130 atomic_long_t lruvec_stat
[NR_VM_NODE_STAT_ITEMS
];
132 unsigned long lru_zone_size
[MAX_NR_ZONES
][NR_LRU_LISTS
];
134 struct mem_cgroup_reclaim_iter iter
[DEF_PRIORITY
+ 1];
136 #ifdef CONFIG_MEMCG_KMEM
137 struct memcg_shrinker_map __rcu
*shrinker_map
;
139 struct rb_node tree_node
; /* RB tree node */
140 unsigned long usage_in_excess
;/* Set to the value by which */
141 /* the soft limit is exceeded*/
143 bool congested
; /* memcg has many dirty pages */
144 /* backed by a congested BDI */
146 struct mem_cgroup
*memcg
; /* Back pointer, we cannot */
147 /* use container_of */
150 struct mem_cgroup_threshold
{
151 struct eventfd_ctx
*eventfd
;
152 unsigned long threshold
;
156 struct mem_cgroup_threshold_ary
{
157 /* An array index points to threshold just below or equal to usage. */
158 int current_threshold
;
159 /* Size of entries[] */
161 /* Array of thresholds */
162 struct mem_cgroup_threshold entries
[0];
165 struct mem_cgroup_thresholds
{
166 /* Primary thresholds array */
167 struct mem_cgroup_threshold_ary
*primary
;
169 * Spare threshold array.
170 * This is needed to make mem_cgroup_unregister_event() "never fail".
171 * It must be able to store at least primary->size - 1 entries.
173 struct mem_cgroup_threshold_ary
*spare
;
176 enum memcg_kmem_state
{
182 #if defined(CONFIG_SMP)
183 struct memcg_padding
{
185 } ____cacheline_internodealigned_in_smp
;
186 #define MEMCG_PADDING(name) struct memcg_padding name;
188 #define MEMCG_PADDING(name)
192 * The memory controller data structure. The memory controller controls both
193 * page cache and RSS per cgroup. We would eventually like to provide
194 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
195 * to help the administrator determine what knobs to tune.
198 struct cgroup_subsys_state css
;
200 /* Private memcg ID. Used to ID objects that outlive the cgroup */
201 struct mem_cgroup_id id
;
203 /* Accounted resources */
204 struct page_counter memory
;
205 struct page_counter swap
;
207 /* Legacy consumer-oriented counters */
208 struct page_counter memsw
;
209 struct page_counter kmem
;
210 struct page_counter tcpmem
;
212 /* Upper bound of normal memory consumption range */
215 /* Range enforcement for interrupt charges */
216 struct work_struct high_work
;
218 unsigned long soft_limit
;
220 /* vmpressure notifications */
221 struct vmpressure vmpressure
;
224 * Should the accounting and control be hierarchical, per subtree?
229 * Should the OOM killer kill all belonging tasks, had it kill one?
233 /* protected by memcg_oom_lock */
238 /* OOM-Killer disable */
239 int oom_kill_disable
;
242 struct cgroup_file events_file
;
244 /* handle for "memory.swap.events" */
245 struct cgroup_file swap_events_file
;
247 /* protect arrays of thresholds */
248 struct mutex thresholds_lock
;
250 /* thresholds for memory usage. RCU-protected */
251 struct mem_cgroup_thresholds thresholds
;
253 /* thresholds for mem+swap usage. RCU-protected */
254 struct mem_cgroup_thresholds memsw_thresholds
;
256 /* For oom notifier event fd */
257 struct list_head oom_notify
;
260 * Should we move charges of a task when a task is moved into this
261 * mem_cgroup ? And what type of charges should we move ?
263 unsigned long move_charge_at_immigrate
;
264 /* taken only while moving_account > 0 */
265 spinlock_t move_lock
;
266 unsigned long move_lock_flags
;
268 MEMCG_PADDING(_pad1_
);
271 * set > 0 if pages under this cgroup are moving to other cgroup.
273 atomic_t moving_account
;
274 struct task_struct
*move_lock_task
;
277 struct mem_cgroup_stat_cpu __percpu
*stat_cpu
;
279 MEMCG_PADDING(_pad2_
);
281 atomic_long_t stat
[MEMCG_NR_STAT
];
282 atomic_long_t events
[NR_VM_EVENT_ITEMS
];
283 atomic_long_t memory_events
[MEMCG_NR_MEMORY_EVENTS
];
285 unsigned long socket_pressure
;
287 /* Legacy tcp memory accounting */
291 #ifdef CONFIG_MEMCG_KMEM
292 /* Index in the kmem_cache->memcg_params.memcg_caches array */
294 enum memcg_kmem_state kmem_state
;
295 struct list_head kmem_caches
;
298 int last_scanned_node
;
300 nodemask_t scan_nodes
;
301 atomic_t numainfo_events
;
302 atomic_t numainfo_updating
;
305 #ifdef CONFIG_CGROUP_WRITEBACK
306 struct list_head cgwb_list
;
307 struct wb_domain cgwb_domain
;
310 /* List of events which userspace want to receive */
311 struct list_head event_list
;
312 spinlock_t event_list_lock
;
314 struct mem_cgroup_per_node
*nodeinfo
[0];
315 /* WARNING: nodeinfo must be the last member here */
319 * size of first charge trial. "32" comes from vmscan.c's magic value.
320 * TODO: maybe necessary to use big numbers in big irons.
322 #define MEMCG_CHARGE_BATCH 32U
324 extern struct mem_cgroup
*root_mem_cgroup
;
326 static inline bool mem_cgroup_is_root(struct mem_cgroup
*memcg
)
328 return (memcg
== root_mem_cgroup
);
331 static inline bool mem_cgroup_disabled(void)
333 return !cgroup_subsys_enabled(memory_cgrp_subsys
);
336 enum mem_cgroup_protection
mem_cgroup_protected(struct mem_cgroup
*root
,
337 struct mem_cgroup
*memcg
);
339 int mem_cgroup_try_charge(struct page
*page
, struct mm_struct
*mm
,
340 gfp_t gfp_mask
, struct mem_cgroup
**memcgp
,
342 int mem_cgroup_try_charge_delay(struct page
*page
, struct mm_struct
*mm
,
343 gfp_t gfp_mask
, struct mem_cgroup
**memcgp
,
345 void mem_cgroup_commit_charge(struct page
*page
, struct mem_cgroup
*memcg
,
346 bool lrucare
, bool compound
);
347 void mem_cgroup_cancel_charge(struct page
*page
, struct mem_cgroup
*memcg
,
349 void mem_cgroup_uncharge(struct page
*page
);
350 void mem_cgroup_uncharge_list(struct list_head
*page_list
);
352 void mem_cgroup_migrate(struct page
*oldpage
, struct page
*newpage
);
354 static struct mem_cgroup_per_node
*
355 mem_cgroup_nodeinfo(struct mem_cgroup
*memcg
, int nid
)
357 return memcg
->nodeinfo
[nid
];
361 * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
362 * @node: node of the wanted lruvec
363 * @memcg: memcg of the wanted lruvec
365 * Returns the lru list vector holding pages for a given @node or a given
366 * @memcg and @zone. This can be the node lruvec, if the memory controller
369 static inline struct lruvec
*mem_cgroup_lruvec(struct pglist_data
*pgdat
,
370 struct mem_cgroup
*memcg
)
372 struct mem_cgroup_per_node
*mz
;
373 struct lruvec
*lruvec
;
375 if (mem_cgroup_disabled()) {
376 lruvec
= node_lruvec(pgdat
);
380 mz
= mem_cgroup_nodeinfo(memcg
, pgdat
->node_id
);
381 lruvec
= &mz
->lruvec
;
384 * Since a node can be onlined after the mem_cgroup was created,
385 * we have to be prepared to initialize lruvec->pgdat here;
386 * and if offlined then reonlined, we need to reinitialize it.
388 if (unlikely(lruvec
->pgdat
!= pgdat
))
389 lruvec
->pgdat
= pgdat
;
393 struct lruvec
*mem_cgroup_page_lruvec(struct page
*, struct pglist_data
*);
395 bool task_in_mem_cgroup(struct task_struct
*task
, struct mem_cgroup
*memcg
);
396 struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
398 struct mem_cgroup
*get_mem_cgroup_from_mm(struct mm_struct
*mm
);
400 struct mem_cgroup
*get_mem_cgroup_from_page(struct page
*page
);
403 struct mem_cgroup
*mem_cgroup_from_css(struct cgroup_subsys_state
*css
){
404 return css
? container_of(css
, struct mem_cgroup
, css
) : NULL
;
407 static inline void mem_cgroup_put(struct mem_cgroup
*memcg
)
410 css_put(&memcg
->css
);
413 #define mem_cgroup_from_counter(counter, member) \
414 container_of(counter, struct mem_cgroup, member)
416 struct mem_cgroup
*mem_cgroup_iter(struct mem_cgroup
*,
418 struct mem_cgroup_reclaim_cookie
*);
419 void mem_cgroup_iter_break(struct mem_cgroup
*, struct mem_cgroup
*);
420 int mem_cgroup_scan_tasks(struct mem_cgroup
*,
421 int (*)(struct task_struct
*, void *), void *);
423 static inline unsigned short mem_cgroup_id(struct mem_cgroup
*memcg
)
425 if (mem_cgroup_disabled())
430 struct mem_cgroup
*mem_cgroup_from_id(unsigned short id
);
432 static inline struct mem_cgroup
*mem_cgroup_from_seq(struct seq_file
*m
)
434 return mem_cgroup_from_css(seq_css(m
));
437 static inline struct mem_cgroup
*lruvec_memcg(struct lruvec
*lruvec
)
439 struct mem_cgroup_per_node
*mz
;
441 if (mem_cgroup_disabled())
444 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
449 * parent_mem_cgroup - find the accounting parent of a memcg
450 * @memcg: memcg whose parent to find
452 * Returns the parent memcg, or NULL if this is the root or the memory
453 * controller is in legacy no-hierarchy mode.
455 static inline struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
)
457 if (!memcg
->memory
.parent
)
459 return mem_cgroup_from_counter(memcg
->memory
.parent
, memory
);
462 static inline bool mem_cgroup_is_descendant(struct mem_cgroup
*memcg
,
463 struct mem_cgroup
*root
)
467 if (!root
->use_hierarchy
)
469 return cgroup_is_descendant(memcg
->css
.cgroup
, root
->css
.cgroup
);
472 static inline bool mm_match_cgroup(struct mm_struct
*mm
,
473 struct mem_cgroup
*memcg
)
475 struct mem_cgroup
*task_memcg
;
479 task_memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
481 match
= mem_cgroup_is_descendant(task_memcg
, memcg
);
486 struct cgroup_subsys_state
*mem_cgroup_css_from_page(struct page
*page
);
487 ino_t
page_cgroup_ino(struct page
*page
);
489 static inline bool mem_cgroup_online(struct mem_cgroup
*memcg
)
491 if (mem_cgroup_disabled())
493 return !!(memcg
->css
.flags
& CSS_ONLINE
);
497 * For memory reclaim.
499 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
);
501 void mem_cgroup_update_lru_size(struct lruvec
*lruvec
, enum lru_list lru
,
502 int zid
, int nr_pages
);
504 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup
*memcg
,
505 int nid
, unsigned int lru_mask
);
508 unsigned long mem_cgroup_get_lru_size(struct lruvec
*lruvec
, enum lru_list lru
)
510 struct mem_cgroup_per_node
*mz
;
511 unsigned long nr_pages
= 0;
514 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
515 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++)
516 nr_pages
+= mz
->lru_zone_size
[zid
][lru
];
521 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec
*lruvec
,
522 enum lru_list lru
, int zone_idx
)
524 struct mem_cgroup_per_node
*mz
;
526 mz
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
527 return mz
->lru_zone_size
[zone_idx
][lru
];
530 void mem_cgroup_handle_over_high(void);
532 unsigned long mem_cgroup_get_max(struct mem_cgroup
*memcg
);
534 void mem_cgroup_print_oom_context(struct mem_cgroup
*memcg
,
535 struct task_struct
*p
);
537 void mem_cgroup_print_oom_meminfo(struct mem_cgroup
*memcg
);
539 static inline void mem_cgroup_enter_user_fault(void)
541 WARN_ON(current
->in_user_fault
);
542 current
->in_user_fault
= 1;
545 static inline void mem_cgroup_exit_user_fault(void)
547 WARN_ON(!current
->in_user_fault
);
548 current
->in_user_fault
= 0;
551 static inline bool task_in_memcg_oom(struct task_struct
*p
)
553 return p
->memcg_in_oom
;
556 bool mem_cgroup_oom_synchronize(bool wait
);
557 struct mem_cgroup
*mem_cgroup_get_oom_group(struct task_struct
*victim
,
558 struct mem_cgroup
*oom_domain
);
559 void mem_cgroup_print_oom_group(struct mem_cgroup
*memcg
);
561 #ifdef CONFIG_MEMCG_SWAP
562 extern int do_swap_account
;
565 struct mem_cgroup
*lock_page_memcg(struct page
*page
);
566 void __unlock_page_memcg(struct mem_cgroup
*memcg
);
567 void unlock_page_memcg(struct page
*page
);
570 * idx can be of type enum memcg_stat_item or node_stat_item.
571 * Keep in sync with memcg_exact_page_state().
573 static inline unsigned long memcg_page_state(struct mem_cgroup
*memcg
,
576 long x
= atomic_long_read(&memcg
->stat
[idx
]);
584 /* idx can be of type enum memcg_stat_item or node_stat_item */
585 static inline void __mod_memcg_state(struct mem_cgroup
*memcg
,
590 if (mem_cgroup_disabled())
593 x
= val
+ __this_cpu_read(memcg
->stat_cpu
->count
[idx
]);
594 if (unlikely(abs(x
) > MEMCG_CHARGE_BATCH
)) {
595 atomic_long_add(x
, &memcg
->stat
[idx
]);
598 __this_cpu_write(memcg
->stat_cpu
->count
[idx
], x
);
601 /* idx can be of type enum memcg_stat_item or node_stat_item */
602 static inline void mod_memcg_state(struct mem_cgroup
*memcg
,
607 local_irq_save(flags
);
608 __mod_memcg_state(memcg
, idx
, val
);
609 local_irq_restore(flags
);
613 * mod_memcg_page_state - update page state statistics
615 * @idx: page state item to account
616 * @val: number of pages (positive or negative)
618 * The @page must be locked or the caller must use lock_page_memcg()
619 * to prevent double accounting when the page is concurrently being
620 * moved to another memcg:
622 * lock_page(page) or lock_page_memcg(page)
623 * if (TestClearPageState(page))
624 * mod_memcg_page_state(page, state, -1);
625 * unlock_page(page) or unlock_page_memcg(page)
627 * Kernel pages are an exception to this, since they'll never move.
629 static inline void __mod_memcg_page_state(struct page
*page
,
632 if (page
->mem_cgroup
)
633 __mod_memcg_state(page
->mem_cgroup
, idx
, val
);
636 static inline void mod_memcg_page_state(struct page
*page
,
639 if (page
->mem_cgroup
)
640 mod_memcg_state(page
->mem_cgroup
, idx
, val
);
643 static inline unsigned long lruvec_page_state(struct lruvec
*lruvec
,
644 enum node_stat_item idx
)
646 struct mem_cgroup_per_node
*pn
;
649 if (mem_cgroup_disabled())
650 return node_page_state(lruvec_pgdat(lruvec
), idx
);
652 pn
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
653 x
= atomic_long_read(&pn
->lruvec_stat
[idx
]);
661 static inline void __mod_lruvec_state(struct lruvec
*lruvec
,
662 enum node_stat_item idx
, int val
)
664 struct mem_cgroup_per_node
*pn
;
668 __mod_node_page_state(lruvec_pgdat(lruvec
), idx
, val
);
670 if (mem_cgroup_disabled())
673 pn
= container_of(lruvec
, struct mem_cgroup_per_node
, lruvec
);
676 __mod_memcg_state(pn
->memcg
, idx
, val
);
679 x
= val
+ __this_cpu_read(pn
->lruvec_stat_cpu
->count
[idx
]);
680 if (unlikely(abs(x
) > MEMCG_CHARGE_BATCH
)) {
681 atomic_long_add(x
, &pn
->lruvec_stat
[idx
]);
684 __this_cpu_write(pn
->lruvec_stat_cpu
->count
[idx
], x
);
687 static inline void mod_lruvec_state(struct lruvec
*lruvec
,
688 enum node_stat_item idx
, int val
)
692 local_irq_save(flags
);
693 __mod_lruvec_state(lruvec
, idx
, val
);
694 local_irq_restore(flags
);
697 static inline void __mod_lruvec_page_state(struct page
*page
,
698 enum node_stat_item idx
, int val
)
700 pg_data_t
*pgdat
= page_pgdat(page
);
701 struct lruvec
*lruvec
;
703 /* Untracked pages have no memcg, no lruvec. Update only the node */
704 if (!page
->mem_cgroup
) {
705 __mod_node_page_state(pgdat
, idx
, val
);
709 lruvec
= mem_cgroup_lruvec(pgdat
, page
->mem_cgroup
);
710 __mod_lruvec_state(lruvec
, idx
, val
);
713 static inline void mod_lruvec_page_state(struct page
*page
,
714 enum node_stat_item idx
, int val
)
718 local_irq_save(flags
);
719 __mod_lruvec_page_state(page
, idx
, val
);
720 local_irq_restore(flags
);
723 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t
*pgdat
, int order
,
725 unsigned long *total_scanned
);
727 static inline void __count_memcg_events(struct mem_cgroup
*memcg
,
728 enum vm_event_item idx
,
733 if (mem_cgroup_disabled())
736 x
= count
+ __this_cpu_read(memcg
->stat_cpu
->events
[idx
]);
737 if (unlikely(x
> MEMCG_CHARGE_BATCH
)) {
738 atomic_long_add(x
, &memcg
->events
[idx
]);
741 __this_cpu_write(memcg
->stat_cpu
->events
[idx
], x
);
744 static inline void count_memcg_events(struct mem_cgroup
*memcg
,
745 enum vm_event_item idx
,
750 local_irq_save(flags
);
751 __count_memcg_events(memcg
, idx
, count
);
752 local_irq_restore(flags
);
755 static inline void count_memcg_page_event(struct page
*page
,
756 enum vm_event_item idx
)
758 if (page
->mem_cgroup
)
759 count_memcg_events(page
->mem_cgroup
, idx
, 1);
762 static inline void count_memcg_event_mm(struct mm_struct
*mm
,
763 enum vm_event_item idx
)
765 struct mem_cgroup
*memcg
;
767 if (mem_cgroup_disabled())
771 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
773 count_memcg_events(memcg
, idx
, 1);
777 static inline void memcg_memory_event(struct mem_cgroup
*memcg
,
778 enum memcg_memory_event event
)
780 atomic_long_inc(&memcg
->memory_events
[event
]);
781 cgroup_file_notify(&memcg
->events_file
);
784 static inline void memcg_memory_event_mm(struct mm_struct
*mm
,
785 enum memcg_memory_event event
)
787 struct mem_cgroup
*memcg
;
789 if (mem_cgroup_disabled())
793 memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
795 memcg_memory_event(memcg
, event
);
799 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
800 void mem_cgroup_split_huge_fixup(struct page
*head
);
803 #else /* CONFIG_MEMCG */
805 #define MEM_CGROUP_ID_SHIFT 0
806 #define MEM_CGROUP_ID_MAX 0
810 static inline bool mem_cgroup_is_root(struct mem_cgroup
*memcg
)
815 static inline bool mem_cgroup_disabled(void)
820 static inline void memcg_memory_event(struct mem_cgroup
*memcg
,
821 enum memcg_memory_event event
)
825 static inline void memcg_memory_event_mm(struct mm_struct
*mm
,
826 enum memcg_memory_event event
)
830 static inline enum mem_cgroup_protection
mem_cgroup_protected(
831 struct mem_cgroup
*root
, struct mem_cgroup
*memcg
)
833 return MEMCG_PROT_NONE
;
836 static inline int mem_cgroup_try_charge(struct page
*page
, struct mm_struct
*mm
,
838 struct mem_cgroup
**memcgp
,
845 static inline int mem_cgroup_try_charge_delay(struct page
*page
,
846 struct mm_struct
*mm
,
848 struct mem_cgroup
**memcgp
,
855 static inline void mem_cgroup_commit_charge(struct page
*page
,
856 struct mem_cgroup
*memcg
,
857 bool lrucare
, bool compound
)
861 static inline void mem_cgroup_cancel_charge(struct page
*page
,
862 struct mem_cgroup
*memcg
,
867 static inline void mem_cgroup_uncharge(struct page
*page
)
871 static inline void mem_cgroup_uncharge_list(struct list_head
*page_list
)
875 static inline void mem_cgroup_migrate(struct page
*old
, struct page
*new)
879 static inline struct lruvec
*mem_cgroup_lruvec(struct pglist_data
*pgdat
,
880 struct mem_cgroup
*memcg
)
882 return node_lruvec(pgdat
);
885 static inline struct lruvec
*mem_cgroup_page_lruvec(struct page
*page
,
886 struct pglist_data
*pgdat
)
888 return &pgdat
->lruvec
;
891 static inline bool mm_match_cgroup(struct mm_struct
*mm
,
892 struct mem_cgroup
*memcg
)
897 static inline bool task_in_mem_cgroup(struct task_struct
*task
,
898 const struct mem_cgroup
*memcg
)
903 static inline struct mem_cgroup
*get_mem_cgroup_from_mm(struct mm_struct
*mm
)
908 static inline struct mem_cgroup
*get_mem_cgroup_from_page(struct page
*page
)
913 static inline void mem_cgroup_put(struct mem_cgroup
*memcg
)
917 static inline struct mem_cgroup
*
918 mem_cgroup_iter(struct mem_cgroup
*root
,
919 struct mem_cgroup
*prev
,
920 struct mem_cgroup_reclaim_cookie
*reclaim
)
925 static inline void mem_cgroup_iter_break(struct mem_cgroup
*root
,
926 struct mem_cgroup
*prev
)
930 static inline int mem_cgroup_scan_tasks(struct mem_cgroup
*memcg
,
931 int (*fn
)(struct task_struct
*, void *), void *arg
)
936 static inline unsigned short mem_cgroup_id(struct mem_cgroup
*memcg
)
941 static inline struct mem_cgroup
*mem_cgroup_from_id(unsigned short id
)
944 /* XXX: This should always return root_mem_cgroup */
948 static inline struct mem_cgroup
*mem_cgroup_from_seq(struct seq_file
*m
)
953 static inline struct mem_cgroup
*lruvec_memcg(struct lruvec
*lruvec
)
958 static inline bool mem_cgroup_online(struct mem_cgroup
*memcg
)
963 static inline unsigned long
964 mem_cgroup_get_lru_size(struct lruvec
*lruvec
, enum lru_list lru
)
969 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec
*lruvec
,
970 enum lru_list lru
, int zone_idx
)
975 static inline unsigned long
976 mem_cgroup_node_nr_lru_pages(struct mem_cgroup
*memcg
,
977 int nid
, unsigned int lru_mask
)
982 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup
*memcg
)
988 mem_cgroup_print_oom_context(struct mem_cgroup
*memcg
, struct task_struct
*p
)
993 mem_cgroup_print_oom_meminfo(struct mem_cgroup
*memcg
)
997 static inline struct mem_cgroup
*lock_page_memcg(struct page
*page
)
1002 static inline void __unlock_page_memcg(struct mem_cgroup
*memcg
)
1006 static inline void unlock_page_memcg(struct page
*page
)
1010 static inline void mem_cgroup_handle_over_high(void)
1014 static inline void mem_cgroup_enter_user_fault(void)
1018 static inline void mem_cgroup_exit_user_fault(void)
1022 static inline bool task_in_memcg_oom(struct task_struct
*p
)
1027 static inline bool mem_cgroup_oom_synchronize(bool wait
)
1032 static inline struct mem_cgroup
*mem_cgroup_get_oom_group(
1033 struct task_struct
*victim
, struct mem_cgroup
*oom_domain
)
1038 static inline void mem_cgroup_print_oom_group(struct mem_cgroup
*memcg
)
1042 static inline unsigned long memcg_page_state(struct mem_cgroup
*memcg
,
1048 static inline void __mod_memcg_state(struct mem_cgroup
*memcg
,
1054 static inline void mod_memcg_state(struct mem_cgroup
*memcg
,
1060 static inline void __mod_memcg_page_state(struct page
*page
,
1066 static inline void mod_memcg_page_state(struct page
*page
,
1072 static inline unsigned long lruvec_page_state(struct lruvec
*lruvec
,
1073 enum node_stat_item idx
)
1075 return node_page_state(lruvec_pgdat(lruvec
), idx
);
1078 static inline void __mod_lruvec_state(struct lruvec
*lruvec
,
1079 enum node_stat_item idx
, int val
)
1081 __mod_node_page_state(lruvec_pgdat(lruvec
), idx
, val
);
1084 static inline void mod_lruvec_state(struct lruvec
*lruvec
,
1085 enum node_stat_item idx
, int val
)
1087 mod_node_page_state(lruvec_pgdat(lruvec
), idx
, val
);
1090 static inline void __mod_lruvec_page_state(struct page
*page
,
1091 enum node_stat_item idx
, int val
)
1093 __mod_node_page_state(page_pgdat(page
), idx
, val
);
1096 static inline void mod_lruvec_page_state(struct page
*page
,
1097 enum node_stat_item idx
, int val
)
1099 mod_node_page_state(page_pgdat(page
), idx
, val
);
1103 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t
*pgdat
, int order
,
1105 unsigned long *total_scanned
)
1110 static inline void mem_cgroup_split_huge_fixup(struct page
*head
)
1114 static inline void count_memcg_events(struct mem_cgroup
*memcg
,
1115 enum vm_event_item idx
,
1116 unsigned long count
)
1120 static inline void count_memcg_page_event(struct page
*page
,
1126 void count_memcg_event_mm(struct mm_struct
*mm
, enum vm_event_item idx
)
1129 #endif /* CONFIG_MEMCG */
1131 /* idx can be of type enum memcg_stat_item or node_stat_item */
1132 static inline void __inc_memcg_state(struct mem_cgroup
*memcg
,
1135 __mod_memcg_state(memcg
, idx
, 1);
1138 /* idx can be of type enum memcg_stat_item or node_stat_item */
1139 static inline void __dec_memcg_state(struct mem_cgroup
*memcg
,
1142 __mod_memcg_state(memcg
, idx
, -1);
1145 /* idx can be of type enum memcg_stat_item or node_stat_item */
1146 static inline void __inc_memcg_page_state(struct page
*page
,
1149 __mod_memcg_page_state(page
, idx
, 1);
1152 /* idx can be of type enum memcg_stat_item or node_stat_item */
1153 static inline void __dec_memcg_page_state(struct page
*page
,
1156 __mod_memcg_page_state(page
, idx
, -1);
1159 static inline void __inc_lruvec_state(struct lruvec
*lruvec
,
1160 enum node_stat_item idx
)
1162 __mod_lruvec_state(lruvec
, idx
, 1);
1165 static inline void __dec_lruvec_state(struct lruvec
*lruvec
,
1166 enum node_stat_item idx
)
1168 __mod_lruvec_state(lruvec
, idx
, -1);
1171 static inline void __inc_lruvec_page_state(struct page
*page
,
1172 enum node_stat_item idx
)
1174 __mod_lruvec_page_state(page
, idx
, 1);
1177 static inline void __dec_lruvec_page_state(struct page
*page
,
1178 enum node_stat_item idx
)
1180 __mod_lruvec_page_state(page
, idx
, -1);
1183 /* idx can be of type enum memcg_stat_item or node_stat_item */
1184 static inline void inc_memcg_state(struct mem_cgroup
*memcg
,
1187 mod_memcg_state(memcg
, idx
, 1);
1190 /* idx can be of type enum memcg_stat_item or node_stat_item */
1191 static inline void dec_memcg_state(struct mem_cgroup
*memcg
,
1194 mod_memcg_state(memcg
, idx
, -1);
1197 /* idx can be of type enum memcg_stat_item or node_stat_item */
1198 static inline void inc_memcg_page_state(struct page
*page
,
1201 mod_memcg_page_state(page
, idx
, 1);
1204 /* idx can be of type enum memcg_stat_item or node_stat_item */
1205 static inline void dec_memcg_page_state(struct page
*page
,
1208 mod_memcg_page_state(page
, idx
, -1);
1211 static inline void inc_lruvec_state(struct lruvec
*lruvec
,
1212 enum node_stat_item idx
)
1214 mod_lruvec_state(lruvec
, idx
, 1);
1217 static inline void dec_lruvec_state(struct lruvec
*lruvec
,
1218 enum node_stat_item idx
)
1220 mod_lruvec_state(lruvec
, idx
, -1);
1223 static inline void inc_lruvec_page_state(struct page
*page
,
1224 enum node_stat_item idx
)
1226 mod_lruvec_page_state(page
, idx
, 1);
1229 static inline void dec_lruvec_page_state(struct page
*page
,
1230 enum node_stat_item idx
)
1232 mod_lruvec_page_state(page
, idx
, -1);
1235 #ifdef CONFIG_CGROUP_WRITEBACK
1237 struct wb_domain
*mem_cgroup_wb_domain(struct bdi_writeback
*wb
);
1238 void mem_cgroup_wb_stats(struct bdi_writeback
*wb
, unsigned long *pfilepages
,
1239 unsigned long *pheadroom
, unsigned long *pdirty
,
1240 unsigned long *pwriteback
);
1242 #else /* CONFIG_CGROUP_WRITEBACK */
1244 static inline struct wb_domain
*mem_cgroup_wb_domain(struct bdi_writeback
*wb
)
1249 static inline void mem_cgroup_wb_stats(struct bdi_writeback
*wb
,
1250 unsigned long *pfilepages
,
1251 unsigned long *pheadroom
,
1252 unsigned long *pdirty
,
1253 unsigned long *pwriteback
)
1257 #endif /* CONFIG_CGROUP_WRITEBACK */
1260 bool mem_cgroup_charge_skmem(struct mem_cgroup
*memcg
, unsigned int nr_pages
);
1261 void mem_cgroup_uncharge_skmem(struct mem_cgroup
*memcg
, unsigned int nr_pages
);
1263 extern struct static_key_false memcg_sockets_enabled_key
;
1264 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1265 void mem_cgroup_sk_alloc(struct sock
*sk
);
1266 void mem_cgroup_sk_free(struct sock
*sk
);
1267 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup
*memcg
)
1269 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys
) && memcg
->tcpmem_pressure
)
1272 if (time_before(jiffies
, memcg
->socket_pressure
))
1274 } while ((memcg
= parent_mem_cgroup(memcg
)));
1278 #define mem_cgroup_sockets_enabled 0
1279 static inline void mem_cgroup_sk_alloc(struct sock
*sk
) { };
1280 static inline void mem_cgroup_sk_free(struct sock
*sk
) { };
1281 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup
*memcg
)
1287 struct kmem_cache
*memcg_kmem_get_cache(struct kmem_cache
*cachep
);
1288 void memcg_kmem_put_cache(struct kmem_cache
*cachep
);
1290 #ifdef CONFIG_MEMCG_KMEM
1291 int __memcg_kmem_charge(struct page
*page
, gfp_t gfp
, int order
);
1292 void __memcg_kmem_uncharge(struct page
*page
, int order
);
1293 int __memcg_kmem_charge_memcg(struct page
*page
, gfp_t gfp
, int order
,
1294 struct mem_cgroup
*memcg
);
1296 extern struct static_key_false memcg_kmem_enabled_key
;
1297 extern struct workqueue_struct
*memcg_kmem_cache_wq
;
1299 extern int memcg_nr_cache_ids
;
1300 void memcg_get_cache_ids(void);
1301 void memcg_put_cache_ids(void);
1304 * Helper macro to loop through all memcg-specific caches. Callers must still
1305 * check if the cache is valid (it is either valid or NULL).
1306 * the slab_mutex must be held when looping through those caches
1308 #define for_each_memcg_cache_index(_idx) \
1309 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1311 static inline bool memcg_kmem_enabled(void)
1313 return static_branch_unlikely(&memcg_kmem_enabled_key
);
1316 static inline int memcg_kmem_charge(struct page
*page
, gfp_t gfp
, int order
)
1318 if (memcg_kmem_enabled())
1319 return __memcg_kmem_charge(page
, gfp
, order
);
1323 static inline void memcg_kmem_uncharge(struct page
*page
, int order
)
1325 if (memcg_kmem_enabled())
1326 __memcg_kmem_uncharge(page
, order
);
1329 static inline int memcg_kmem_charge_memcg(struct page
*page
, gfp_t gfp
,
1330 int order
, struct mem_cgroup
*memcg
)
1332 if (memcg_kmem_enabled())
1333 return __memcg_kmem_charge_memcg(page
, gfp
, order
, memcg
);
1337 * helper for accessing a memcg's index. It will be used as an index in the
1338 * child cache array in kmem_cache, and also to derive its name. This function
1339 * will return -1 when this is not a kmem-limited memcg.
1341 static inline int memcg_cache_id(struct mem_cgroup
*memcg
)
1343 return memcg
? memcg
->kmemcg_id
: -1;
1346 extern int memcg_expand_shrinker_maps(int new_id
);
1348 extern void memcg_set_shrinker_bit(struct mem_cgroup
*memcg
,
1349 int nid
, int shrinker_id
);
1352 static inline int memcg_kmem_charge(struct page
*page
, gfp_t gfp
, int order
)
1357 static inline void memcg_kmem_uncharge(struct page
*page
, int order
)
1361 static inline int __memcg_kmem_charge(struct page
*page
, gfp_t gfp
, int order
)
1366 static inline void __memcg_kmem_uncharge(struct page
*page
, int order
)
1370 #define for_each_memcg_cache_index(_idx) \
1373 static inline bool memcg_kmem_enabled(void)
1378 static inline int memcg_cache_id(struct mem_cgroup
*memcg
)
1383 static inline void memcg_get_cache_ids(void)
1387 static inline void memcg_put_cache_ids(void)
1391 static inline void memcg_set_shrinker_bit(struct mem_cgroup
*memcg
,
1392 int nid
, int shrinker_id
) { }
1393 #endif /* CONFIG_MEMCG_KMEM */
1395 #endif /* _LINUX_MEMCONTROL_H */