3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 #include <linux/cgroup.h>
21 #include <linux/page_counter.h>
22 #include <linux/slab.h>
23 #include <linux/hugetlb.h>
24 #include <linux/hugetlb_cgroup.h>
26 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28 #define MEMFILE_ATTR(val) ((val) & 0xffff)
30 static struct hugetlb_cgroup
*root_h_cgroup __read_mostly
;
32 static inline struct page_counter
*
33 __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup
*h_cg
, int idx
,
37 return &h_cg
->rsvd_hugepage
[idx
];
38 return &h_cg
->hugepage
[idx
];
41 static inline struct page_counter
*
42 hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup
*h_cg
, int idx
)
44 return __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, false);
47 static inline struct page_counter
*
48 hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup
*h_cg
, int idx
)
50 return __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, true);
54 struct hugetlb_cgroup
*hugetlb_cgroup_from_css(struct cgroup_subsys_state
*s
)
56 return s
? container_of(s
, struct hugetlb_cgroup
, css
) : NULL
;
60 struct hugetlb_cgroup
*hugetlb_cgroup_from_task(struct task_struct
*task
)
62 return hugetlb_cgroup_from_css(task_css(task
, hugetlb_cgrp_id
));
65 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup
*h_cg
)
67 return (h_cg
== root_h_cgroup
);
70 static inline struct hugetlb_cgroup
*
71 parent_hugetlb_cgroup(struct hugetlb_cgroup
*h_cg
)
73 return hugetlb_cgroup_from_css(h_cg
->css
.parent
);
76 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup
*h_cg
)
80 for (idx
= 0; idx
< hugetlb_max_hstate
; idx
++) {
81 if (page_counter_read(
82 hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
)))
88 static void hugetlb_cgroup_init(struct hugetlb_cgroup
*h_cgroup
,
89 struct hugetlb_cgroup
*parent_h_cgroup
)
93 for (idx
= 0; idx
< HUGE_MAX_HSTATE
; idx
++) {
94 struct page_counter
*fault_parent
= NULL
;
95 struct page_counter
*rsvd_parent
= NULL
;
99 if (parent_h_cgroup
) {
100 fault_parent
= hugetlb_cgroup_counter_from_cgroup(
101 parent_h_cgroup
, idx
);
102 rsvd_parent
= hugetlb_cgroup_counter_from_cgroup_rsvd(
103 parent_h_cgroup
, idx
);
105 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup
,
109 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup
, idx
),
112 limit
= round_down(PAGE_COUNTER_MAX
,
113 pages_per_huge_page(&hstates
[idx
]));
115 ret
= page_counter_set_max(
116 hugetlb_cgroup_counter_from_cgroup(h_cgroup
, idx
),
119 ret
= page_counter_set_max(
120 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup
, idx
),
126 static void hugetlb_cgroup_free(struct hugetlb_cgroup
*h_cgroup
)
131 kfree(h_cgroup
->nodeinfo
[node
]);
135 static struct cgroup_subsys_state
*
136 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state
*parent_css
)
138 struct hugetlb_cgroup
*parent_h_cgroup
= hugetlb_cgroup_from_css(parent_css
);
139 struct hugetlb_cgroup
*h_cgroup
;
142 h_cgroup
= kzalloc(struct_size(h_cgroup
, nodeinfo
, nr_node_ids
),
146 return ERR_PTR(-ENOMEM
);
148 if (!parent_h_cgroup
)
149 root_h_cgroup
= h_cgroup
;
152 * TODO: this routine can waste much memory for nodes which will
153 * never be onlined. It's better to use memory hotplug callback
156 for_each_node(node
) {
157 /* Set node_to_alloc to -1 for offline nodes. */
159 node_state(node
, N_NORMAL_MEMORY
) ? node
: -1;
160 h_cgroup
->nodeinfo
[node
] =
161 kzalloc_node(sizeof(struct hugetlb_cgroup_per_node
),
162 GFP_KERNEL
, node_to_alloc
);
163 if (!h_cgroup
->nodeinfo
[node
])
164 goto fail_alloc_nodeinfo
;
167 hugetlb_cgroup_init(h_cgroup
, parent_h_cgroup
);
168 return &h_cgroup
->css
;
171 hugetlb_cgroup_free(h_cgroup
);
172 return ERR_PTR(-ENOMEM
);
175 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state
*css
)
177 hugetlb_cgroup_free(hugetlb_cgroup_from_css(css
));
181 * Should be called with hugetlb_lock held.
182 * Since we are holding hugetlb_lock, pages cannot get moved from
183 * active list or uncharged from the cgroup, So no need to get
184 * page reference and test for page active here. This function
187 static void hugetlb_cgroup_move_parent(int idx
, struct hugetlb_cgroup
*h_cg
,
190 unsigned int nr_pages
;
191 struct page_counter
*counter
;
192 struct hugetlb_cgroup
*page_hcg
;
193 struct hugetlb_cgroup
*parent
= parent_hugetlb_cgroup(h_cg
);
195 page_hcg
= hugetlb_cgroup_from_page(page
);
197 * We can have pages in active list without any cgroup
198 * ie, hugepage with less than 3 pages. We can safely
199 * ignore those pages.
201 if (!page_hcg
|| page_hcg
!= h_cg
)
204 nr_pages
= compound_nr(page
);
206 parent
= root_h_cgroup
;
207 /* root has no limit */
208 page_counter_charge(&parent
->hugepage
[idx
], nr_pages
);
210 counter
= &h_cg
->hugepage
[idx
];
211 /* Take the pages off the local counter */
212 page_counter_cancel(counter
, nr_pages
);
214 set_hugetlb_cgroup(page
, parent
);
220 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
223 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state
*css
)
225 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(css
);
233 spin_lock_irq(&hugetlb_lock
);
234 list_for_each_entry(page
, &h
->hugepage_activelist
, lru
)
235 hugetlb_cgroup_move_parent(idx
, h_cg
, page
);
237 spin_unlock_irq(&hugetlb_lock
);
241 } while (hugetlb_cgroup_have_usage(h_cg
));
244 static inline void hugetlb_event(struct hugetlb_cgroup
*hugetlb
, int idx
,
245 enum hugetlb_memory_event event
)
247 atomic_long_inc(&hugetlb
->events_local
[idx
][event
]);
248 cgroup_file_notify(&hugetlb
->events_local_file
[idx
]);
251 atomic_long_inc(&hugetlb
->events
[idx
][event
]);
252 cgroup_file_notify(&hugetlb
->events_file
[idx
]);
253 } while ((hugetlb
= parent_hugetlb_cgroup(hugetlb
)) &&
254 !hugetlb_cgroup_is_root(hugetlb
));
257 static int __hugetlb_cgroup_charge_cgroup(int idx
, unsigned long nr_pages
,
258 struct hugetlb_cgroup
**ptr
,
262 struct page_counter
*counter
;
263 struct hugetlb_cgroup
*h_cg
= NULL
;
265 if (hugetlb_cgroup_disabled())
268 * We don't charge any cgroup if the compound page have less
271 if (huge_page_order(&hstates
[idx
]) < HUGETLB_CGROUP_MIN_ORDER
)
275 h_cg
= hugetlb_cgroup_from_task(current
);
276 if (!css_tryget(&h_cg
->css
)) {
282 if (!page_counter_try_charge(
283 __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, rsvd
),
284 nr_pages
, &counter
)) {
286 hugetlb_event(h_cg
, idx
, HUGETLB_MAX
);
290 /* Reservations take a reference to the css because they do not get
300 int hugetlb_cgroup_charge_cgroup(int idx
, unsigned long nr_pages
,
301 struct hugetlb_cgroup
**ptr
)
303 return __hugetlb_cgroup_charge_cgroup(idx
, nr_pages
, ptr
, false);
306 int hugetlb_cgroup_charge_cgroup_rsvd(int idx
, unsigned long nr_pages
,
307 struct hugetlb_cgroup
**ptr
)
309 return __hugetlb_cgroup_charge_cgroup(idx
, nr_pages
, ptr
, true);
312 /* Should be called with hugetlb_lock held */
313 static void __hugetlb_cgroup_commit_charge(int idx
, unsigned long nr_pages
,
314 struct hugetlb_cgroup
*h_cg
,
315 struct page
*page
, bool rsvd
)
317 if (hugetlb_cgroup_disabled() || !h_cg
)
320 __set_hugetlb_cgroup(page
, h_cg
, rsvd
);
322 unsigned long usage
=
323 h_cg
->nodeinfo
[page_to_nid(page
)]->usage
[idx
];
325 * This write is not atomic due to fetching usage and writing
326 * to it, but that's fine because we call this with
327 * hugetlb_lock held anyway.
329 WRITE_ONCE(h_cg
->nodeinfo
[page_to_nid(page
)]->usage
[idx
],
334 void hugetlb_cgroup_commit_charge(int idx
, unsigned long nr_pages
,
335 struct hugetlb_cgroup
*h_cg
,
338 __hugetlb_cgroup_commit_charge(idx
, nr_pages
, h_cg
, page
, false);
341 void hugetlb_cgroup_commit_charge_rsvd(int idx
, unsigned long nr_pages
,
342 struct hugetlb_cgroup
*h_cg
,
345 __hugetlb_cgroup_commit_charge(idx
, nr_pages
, h_cg
, page
, true);
349 * Should be called with hugetlb_lock held
351 static void __hugetlb_cgroup_uncharge_page(int idx
, unsigned long nr_pages
,
352 struct page
*page
, bool rsvd
)
354 struct hugetlb_cgroup
*h_cg
;
356 if (hugetlb_cgroup_disabled())
358 lockdep_assert_held(&hugetlb_lock
);
359 h_cg
= __hugetlb_cgroup_from_page(page
, rsvd
);
362 __set_hugetlb_cgroup(page
, NULL
, rsvd
);
364 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
,
371 unsigned long usage
=
372 h_cg
->nodeinfo
[page_to_nid(page
)]->usage
[idx
];
374 * This write is not atomic due to fetching usage and writing
375 * to it, but that's fine because we call this with
376 * hugetlb_lock held anyway.
378 WRITE_ONCE(h_cg
->nodeinfo
[page_to_nid(page
)]->usage
[idx
],
383 void hugetlb_cgroup_uncharge_page(int idx
, unsigned long nr_pages
,
386 __hugetlb_cgroup_uncharge_page(idx
, nr_pages
, page
, false);
389 void hugetlb_cgroup_uncharge_page_rsvd(int idx
, unsigned long nr_pages
,
392 __hugetlb_cgroup_uncharge_page(idx
, nr_pages
, page
, true);
395 static void __hugetlb_cgroup_uncharge_cgroup(int idx
, unsigned long nr_pages
,
396 struct hugetlb_cgroup
*h_cg
,
399 if (hugetlb_cgroup_disabled() || !h_cg
)
402 if (huge_page_order(&hstates
[idx
]) < HUGETLB_CGROUP_MIN_ORDER
)
405 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
,
413 void hugetlb_cgroup_uncharge_cgroup(int idx
, unsigned long nr_pages
,
414 struct hugetlb_cgroup
*h_cg
)
416 __hugetlb_cgroup_uncharge_cgroup(idx
, nr_pages
, h_cg
, false);
419 void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx
, unsigned long nr_pages
,
420 struct hugetlb_cgroup
*h_cg
)
422 __hugetlb_cgroup_uncharge_cgroup(idx
, nr_pages
, h_cg
, true);
425 void hugetlb_cgroup_uncharge_counter(struct resv_map
*resv
, unsigned long start
,
428 if (hugetlb_cgroup_disabled() || !resv
|| !resv
->reservation_counter
||
432 page_counter_uncharge(resv
->reservation_counter
,
433 (end
- start
) * resv
->pages_per_hpage
);
437 void hugetlb_cgroup_uncharge_file_region(struct resv_map
*resv
,
438 struct file_region
*rg
,
439 unsigned long nr_pages
,
442 if (hugetlb_cgroup_disabled() || !resv
|| !rg
|| !nr_pages
)
445 if (rg
->reservation_counter
&& resv
->pages_per_hpage
&& nr_pages
> 0 &&
446 !resv
->reservation_counter
) {
447 page_counter_uncharge(rg
->reservation_counter
,
448 nr_pages
* resv
->pages_per_hpage
);
450 * Only do css_put(rg->css) when we delete the entire region
451 * because one file_region must hold exactly one css reference.
469 static int hugetlb_cgroup_read_numa_stat(struct seq_file
*seq
, void *dummy
)
472 struct cftype
*cft
= seq_cft(seq
);
473 int idx
= MEMFILE_IDX(cft
->private);
474 bool legacy
= MEMFILE_ATTR(cft
->private);
475 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(seq_css(seq
));
476 struct cgroup_subsys_state
*css
;
480 /* Add up usage across all nodes for the non-hierarchical total. */
482 for_each_node_state(nid
, N_MEMORY
)
483 usage
+= READ_ONCE(h_cg
->nodeinfo
[nid
]->usage
[idx
]);
484 seq_printf(seq
, "total=%lu", usage
* PAGE_SIZE
);
486 /* Simply print the per-node usage for the non-hierarchical total. */
487 for_each_node_state(nid
, N_MEMORY
)
488 seq_printf(seq
, " N%d=%lu", nid
,
489 READ_ONCE(h_cg
->nodeinfo
[nid
]->usage
[idx
]) *
495 * The hierarchical total is pretty much the value recorded by the
496 * counter, so use that.
498 seq_printf(seq
, "%stotal=%lu", legacy
? "hierarchical_" : "",
499 page_counter_read(&h_cg
->hugepage
[idx
]) * PAGE_SIZE
);
502 * For each node, transverse the css tree to obtain the hierarchical
505 for_each_node_state(nid
, N_MEMORY
) {
508 css_for_each_descendant_pre(css
, &h_cg
->css
) {
509 usage
+= READ_ONCE(hugetlb_cgroup_from_css(css
)
514 seq_printf(seq
, " N%d=%lu", nid
, usage
* PAGE_SIZE
);
522 static u64
hugetlb_cgroup_read_u64(struct cgroup_subsys_state
*css
,
525 struct page_counter
*counter
;
526 struct page_counter
*rsvd_counter
;
527 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(css
);
529 counter
= &h_cg
->hugepage
[MEMFILE_IDX(cft
->private)];
530 rsvd_counter
= &h_cg
->rsvd_hugepage
[MEMFILE_IDX(cft
->private)];
532 switch (MEMFILE_ATTR(cft
->private)) {
534 return (u64
)page_counter_read(counter
) * PAGE_SIZE
;
536 return (u64
)page_counter_read(rsvd_counter
) * PAGE_SIZE
;
538 return (u64
)counter
->max
* PAGE_SIZE
;
540 return (u64
)rsvd_counter
->max
* PAGE_SIZE
;
542 return (u64
)counter
->watermark
* PAGE_SIZE
;
543 case RES_RSVD_MAX_USAGE
:
544 return (u64
)rsvd_counter
->watermark
* PAGE_SIZE
;
546 return counter
->failcnt
;
547 case RES_RSVD_FAILCNT
:
548 return rsvd_counter
->failcnt
;
554 static int hugetlb_cgroup_read_u64_max(struct seq_file
*seq
, void *v
)
558 struct cftype
*cft
= seq_cft(seq
);
560 struct page_counter
*counter
;
561 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(seq_css(seq
));
563 idx
= MEMFILE_IDX(cft
->private);
564 counter
= &h_cg
->hugepage
[idx
];
566 limit
= round_down(PAGE_COUNTER_MAX
,
567 pages_per_huge_page(&hstates
[idx
]));
569 switch (MEMFILE_ATTR(cft
->private)) {
571 counter
= &h_cg
->rsvd_hugepage
[idx
];
574 val
= (u64
)page_counter_read(counter
);
575 seq_printf(seq
, "%llu\n", val
* PAGE_SIZE
);
578 counter
= &h_cg
->rsvd_hugepage
[idx
];
581 val
= (u64
)counter
->max
;
583 seq_puts(seq
, "max\n");
585 seq_printf(seq
, "%llu\n", val
* PAGE_SIZE
);
594 static DEFINE_MUTEX(hugetlb_limit_mutex
);
596 static ssize_t
hugetlb_cgroup_write(struct kernfs_open_file
*of
,
597 char *buf
, size_t nbytes
, loff_t off
,
601 unsigned long nr_pages
;
602 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(of_css(of
));
605 if (hugetlb_cgroup_is_root(h_cg
)) /* Can't set limit on root */
609 ret
= page_counter_memparse(buf
, max
, &nr_pages
);
613 idx
= MEMFILE_IDX(of_cft(of
)->private);
614 nr_pages
= round_down(nr_pages
, pages_per_huge_page(&hstates
[idx
]));
616 switch (MEMFILE_ATTR(of_cft(of
)->private)) {
621 mutex_lock(&hugetlb_limit_mutex
);
622 ret
= page_counter_set_max(
623 __hugetlb_cgroup_counter_from_cgroup(h_cg
, idx
, rsvd
),
625 mutex_unlock(&hugetlb_limit_mutex
);
631 return ret
?: nbytes
;
634 static ssize_t
hugetlb_cgroup_write_legacy(struct kernfs_open_file
*of
,
635 char *buf
, size_t nbytes
, loff_t off
)
637 return hugetlb_cgroup_write(of
, buf
, nbytes
, off
, "-1");
640 static ssize_t
hugetlb_cgroup_write_dfl(struct kernfs_open_file
*of
,
641 char *buf
, size_t nbytes
, loff_t off
)
643 return hugetlb_cgroup_write(of
, buf
, nbytes
, off
, "max");
646 static ssize_t
hugetlb_cgroup_reset(struct kernfs_open_file
*of
,
647 char *buf
, size_t nbytes
, loff_t off
)
650 struct page_counter
*counter
, *rsvd_counter
;
651 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(of_css(of
));
653 counter
= &h_cg
->hugepage
[MEMFILE_IDX(of_cft(of
)->private)];
654 rsvd_counter
= &h_cg
->rsvd_hugepage
[MEMFILE_IDX(of_cft(of
)->private)];
656 switch (MEMFILE_ATTR(of_cft(of
)->private)) {
658 page_counter_reset_watermark(counter
);
660 case RES_RSVD_MAX_USAGE
:
661 page_counter_reset_watermark(rsvd_counter
);
664 counter
->failcnt
= 0;
666 case RES_RSVD_FAILCNT
:
667 rsvd_counter
->failcnt
= 0;
673 return ret
?: nbytes
;
676 static char *mem_fmt(char *buf
, int size
, unsigned long hsize
)
678 if (hsize
>= (1UL << 30))
679 snprintf(buf
, size
, "%luGB", hsize
>> 30);
680 else if (hsize
>= (1UL << 20))
681 snprintf(buf
, size
, "%luMB", hsize
>> 20);
683 snprintf(buf
, size
, "%luKB", hsize
>> 10);
687 static int __hugetlb_events_show(struct seq_file
*seq
, bool local
)
691 struct cftype
*cft
= seq_cft(seq
);
692 struct hugetlb_cgroup
*h_cg
= hugetlb_cgroup_from_css(seq_css(seq
));
694 idx
= MEMFILE_IDX(cft
->private);
697 max
= atomic_long_read(&h_cg
->events_local
[idx
][HUGETLB_MAX
]);
699 max
= atomic_long_read(&h_cg
->events
[idx
][HUGETLB_MAX
]);
701 seq_printf(seq
, "max %lu\n", max
);
706 static int hugetlb_events_show(struct seq_file
*seq
, void *v
)
708 return __hugetlb_events_show(seq
, false);
711 static int hugetlb_events_local_show(struct seq_file
*seq
, void *v
)
713 return __hugetlb_events_show(seq
, true);
716 static void __init
__hugetlb_cgroup_file_dfl_init(int idx
)
720 struct hstate
*h
= &hstates
[idx
];
722 /* format the size */
723 mem_fmt(buf
, sizeof(buf
), huge_page_size(h
));
725 /* Add the limit file */
726 cft
= &h
->cgroup_files_dfl
[0];
727 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.max", buf
);
728 cft
->private = MEMFILE_PRIVATE(idx
, RES_LIMIT
);
729 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
730 cft
->write
= hugetlb_cgroup_write_dfl
;
731 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
733 /* Add the reservation limit file */
734 cft
= &h
->cgroup_files_dfl
[1];
735 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.max", buf
);
736 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_LIMIT
);
737 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
738 cft
->write
= hugetlb_cgroup_write_dfl
;
739 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
741 /* Add the current usage file */
742 cft
= &h
->cgroup_files_dfl
[2];
743 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.current", buf
);
744 cft
->private = MEMFILE_PRIVATE(idx
, RES_USAGE
);
745 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
746 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
748 /* Add the current reservation usage file */
749 cft
= &h
->cgroup_files_dfl
[3];
750 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.current", buf
);
751 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_USAGE
);
752 cft
->seq_show
= hugetlb_cgroup_read_u64_max
;
753 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
755 /* Add the events file */
756 cft
= &h
->cgroup_files_dfl
[4];
757 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.events", buf
);
758 cft
->private = MEMFILE_PRIVATE(idx
, 0);
759 cft
->seq_show
= hugetlb_events_show
;
760 cft
->file_offset
= offsetof(struct hugetlb_cgroup
, events_file
[idx
]);
761 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
763 /* Add the events.local file */
764 cft
= &h
->cgroup_files_dfl
[5];
765 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.events.local", buf
);
766 cft
->private = MEMFILE_PRIVATE(idx
, 0);
767 cft
->seq_show
= hugetlb_events_local_show
;
768 cft
->file_offset
= offsetof(struct hugetlb_cgroup
,
769 events_local_file
[idx
]);
770 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
772 /* Add the numa stat file */
773 cft
= &h
->cgroup_files_dfl
[6];
774 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.numa_stat", buf
);
775 cft
->seq_show
= hugetlb_cgroup_read_numa_stat
;
776 cft
->flags
= CFTYPE_NOT_ON_ROOT
;
778 /* NULL terminate the last cft */
779 cft
= &h
->cgroup_files_dfl
[7];
780 memset(cft
, 0, sizeof(*cft
));
782 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys
,
783 h
->cgroup_files_dfl
));
786 static void __init
__hugetlb_cgroup_file_legacy_init(int idx
)
790 struct hstate
*h
= &hstates
[idx
];
792 /* format the size */
793 mem_fmt(buf
, sizeof(buf
), huge_page_size(h
));
795 /* Add the limit file */
796 cft
= &h
->cgroup_files_legacy
[0];
797 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.limit_in_bytes", buf
);
798 cft
->private = MEMFILE_PRIVATE(idx
, RES_LIMIT
);
799 cft
->read_u64
= hugetlb_cgroup_read_u64
;
800 cft
->write
= hugetlb_cgroup_write_legacy
;
802 /* Add the reservation limit file */
803 cft
= &h
->cgroup_files_legacy
[1];
804 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.limit_in_bytes", buf
);
805 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_LIMIT
);
806 cft
->read_u64
= hugetlb_cgroup_read_u64
;
807 cft
->write
= hugetlb_cgroup_write_legacy
;
809 /* Add the usage file */
810 cft
= &h
->cgroup_files_legacy
[2];
811 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.usage_in_bytes", buf
);
812 cft
->private = MEMFILE_PRIVATE(idx
, RES_USAGE
);
813 cft
->read_u64
= hugetlb_cgroup_read_u64
;
815 /* Add the reservation usage file */
816 cft
= &h
->cgroup_files_legacy
[3];
817 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.usage_in_bytes", buf
);
818 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_USAGE
);
819 cft
->read_u64
= hugetlb_cgroup_read_u64
;
821 /* Add the MAX usage file */
822 cft
= &h
->cgroup_files_legacy
[4];
823 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.max_usage_in_bytes", buf
);
824 cft
->private = MEMFILE_PRIVATE(idx
, RES_MAX_USAGE
);
825 cft
->write
= hugetlb_cgroup_reset
;
826 cft
->read_u64
= hugetlb_cgroup_read_u64
;
828 /* Add the MAX reservation usage file */
829 cft
= &h
->cgroup_files_legacy
[5];
830 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.max_usage_in_bytes", buf
);
831 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_MAX_USAGE
);
832 cft
->write
= hugetlb_cgroup_reset
;
833 cft
->read_u64
= hugetlb_cgroup_read_u64
;
835 /* Add the failcntfile */
836 cft
= &h
->cgroup_files_legacy
[6];
837 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.failcnt", buf
);
838 cft
->private = MEMFILE_PRIVATE(idx
, RES_FAILCNT
);
839 cft
->write
= hugetlb_cgroup_reset
;
840 cft
->read_u64
= hugetlb_cgroup_read_u64
;
842 /* Add the reservation failcntfile */
843 cft
= &h
->cgroup_files_legacy
[7];
844 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.rsvd.failcnt", buf
);
845 cft
->private = MEMFILE_PRIVATE(idx
, RES_RSVD_FAILCNT
);
846 cft
->write
= hugetlb_cgroup_reset
;
847 cft
->read_u64
= hugetlb_cgroup_read_u64
;
849 /* Add the numa stat file */
850 cft
= &h
->cgroup_files_legacy
[8];
851 snprintf(cft
->name
, MAX_CFTYPE_NAME
, "%s.numa_stat", buf
);
852 cft
->private = MEMFILE_PRIVATE(idx
, 1);
853 cft
->seq_show
= hugetlb_cgroup_read_numa_stat
;
855 /* NULL terminate the last cft */
856 cft
= &h
->cgroup_files_legacy
[9];
857 memset(cft
, 0, sizeof(*cft
));
859 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys
,
860 h
->cgroup_files_legacy
));
863 static void __init
__hugetlb_cgroup_file_init(int idx
)
865 __hugetlb_cgroup_file_dfl_init(idx
);
866 __hugetlb_cgroup_file_legacy_init(idx
);
869 void __init
hugetlb_cgroup_file_init(void)
875 * Add cgroup control files only if the huge page consists
876 * of more than two normal pages. This is because we use
877 * page[2].private for storing cgroup details.
879 if (huge_page_order(h
) >= HUGETLB_CGROUP_MIN_ORDER
)
880 __hugetlb_cgroup_file_init(hstate_index(h
));
885 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
886 * when we migrate hugepages
888 void hugetlb_cgroup_migrate(struct page
*oldhpage
, struct page
*newhpage
)
890 struct hugetlb_cgroup
*h_cg
;
891 struct hugetlb_cgroup
*h_cg_rsvd
;
892 struct hstate
*h
= page_hstate(oldhpage
);
894 if (hugetlb_cgroup_disabled())
897 spin_lock_irq(&hugetlb_lock
);
898 h_cg
= hugetlb_cgroup_from_page(oldhpage
);
899 h_cg_rsvd
= hugetlb_cgroup_from_page_rsvd(oldhpage
);
900 set_hugetlb_cgroup(oldhpage
, NULL
);
901 set_hugetlb_cgroup_rsvd(oldhpage
, NULL
);
903 /* move the h_cg details to new cgroup */
904 set_hugetlb_cgroup(newhpage
, h_cg
);
905 set_hugetlb_cgroup_rsvd(newhpage
, h_cg_rsvd
);
906 list_move(&newhpage
->lru
, &h
->hugepage_activelist
);
907 spin_unlock_irq(&hugetlb_lock
);
911 static struct cftype hugetlb_files
[] = {
915 struct cgroup_subsys hugetlb_cgrp_subsys
= {
916 .css_alloc
= hugetlb_cgroup_css_alloc
,
917 .css_offline
= hugetlb_cgroup_css_offline
,
918 .css_free
= hugetlb_cgroup_css_free
,
919 .dfl_cftypes
= hugetlb_files
,
920 .legacy_cftypes
= hugetlb_files
,