]> git.ipfire.org Git - people/arne_f/kernel.git/blob - include/linux/memcontrol.h
memcg: mem+swap controller Kconfig
[people/arne_f/kernel.git] / include / linux / memcontrol.h
1 /* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22
23 struct mem_cgroup;
24 struct page_cgroup;
25 struct page;
26 struct mm_struct;
27
28 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
29
30 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
31 gfp_t gfp_mask);
32 /* for swap handling */
33 extern int mem_cgroup_try_charge(struct mm_struct *mm,
34 gfp_t gfp_mask, struct mem_cgroup **ptr);
35 extern void mem_cgroup_commit_charge_swapin(struct page *page,
36 struct mem_cgroup *ptr);
37 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
38
39 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
40 gfp_t gfp_mask);
41 extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
42 extern void mem_cgroup_uncharge_page(struct page *page);
43 extern void mem_cgroup_uncharge_cache_page(struct page *page);
44 extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
45
46 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
47 struct list_head *dst,
48 unsigned long *scanned, int order,
49 int mode, struct zone *z,
50 struct mem_cgroup *mem_cont,
51 int active, int file);
52 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
53 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
54
55 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
56
57 #define mm_match_cgroup(mm, cgroup) \
58 ((cgroup) == mem_cgroup_from_task((mm)->owner))
59
60 extern int
61 mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
62 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
63 struct page *oldpage, struct page *newpage);
64
65 /*
66 * For memory reclaim.
67 */
68 extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
69 extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
70
71 extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
72 extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
73 int priority);
74 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
75 int priority);
76
77 extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
78 int priority, enum lru_list lru);
79
80 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
81 extern int do_swap_account;
82 #endif
83
84 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
85 struct mem_cgroup;
86
87 static inline int mem_cgroup_newpage_charge(struct page *page,
88 struct mm_struct *mm, gfp_t gfp_mask)
89 {
90 return 0;
91 }
92
93 static inline int mem_cgroup_cache_charge(struct page *page,
94 struct mm_struct *mm, gfp_t gfp_mask)
95 {
96 return 0;
97 }
98
99 static inline int mem_cgroup_try_charge(struct mm_struct *mm,
100 gfp_t gfp_mask, struct mem_cgroup **ptr)
101 {
102 return 0;
103 }
104
105 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
106 struct mem_cgroup *ptr)
107 {
108 }
109
110 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
111 {
112 }
113
114 static inline void mem_cgroup_uncharge_page(struct page *page)
115 {
116 }
117
118 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
119 {
120 }
121
122 static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
123 {
124 return 0;
125 }
126
127 static inline void mem_cgroup_move_lists(struct page *page, bool active)
128 {
129 }
130
131 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
132 {
133 return 1;
134 }
135
136 static inline int task_in_mem_cgroup(struct task_struct *task,
137 const struct mem_cgroup *mem)
138 {
139 return 1;
140 }
141
142 static inline int
143 mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
144 {
145 return 0;
146 }
147
148 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
149 struct page *oldpage,
150 struct page *newpage)
151 {
152 }
153
154 static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
155 {
156 return 0;
157 }
158
159 static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
160 {
161 return 0;
162 }
163
164 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
165 {
166 return 0;
167 }
168
169 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
170 int priority)
171 {
172 }
173
174 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
175 int priority)
176 {
177 }
178
179 static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
180 struct zone *zone, int priority,
181 enum lru_list lru)
182 {
183 return 0;
184 }
185 #endif /* CONFIG_CGROUP_MEM_CONT */
186
187 #endif /* _LINUX_MEMCONTROL_H */
188