]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/hugetlb_cgroup.c
mm/hugetlb_cgroup: convert hugetlb_cgroup_from_page() to folios
[thirdparty/linux.git] / mm / hugetlb_cgroup.c
CommitLineData
2bc64a20
AK
1/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
faced7e0
GS
6 * Cgroup v2
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
9 *
2bc64a20
AK
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 *
18 */
19
20#include <linux/cgroup.h>
71f87bee 21#include <linux/page_counter.h>
2bc64a20
AK
22#include <linux/slab.h>
23#include <linux/hugetlb.h>
24#include <linux/hugetlb_cgroup.h>
25
abb8206c
AK
26#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28#define MEMFILE_ATTR(val) ((val) & 0xffff)
29
2bc64a20
AK
30static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
31
cdc2fcfe 32static inline struct page_counter *
1adc4d41
MA
33__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
34 bool rsvd)
cdc2fcfe
MA
35{
36 if (rsvd)
37 return &h_cg->rsvd_hugepage[idx];
38 return &h_cg->hugepage[idx];
39}
40
1adc4d41
MA
41static inline struct page_counter *
42hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
43{
44 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
45}
46
47static inline struct page_counter *
48hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
49{
50 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
51}
52
2bc64a20
AK
53static inline
54struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
55{
a7c6d554 56 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
2bc64a20
AK
57}
58
2bc64a20
AK
59static inline
60struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
61{
073219e9 62 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
2bc64a20
AK
63}
64
65static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
66{
67 return (h_cg == root_h_cgroup);
68}
69
3f798518
TH
70static inline struct hugetlb_cgroup *
71parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
2bc64a20 72{
5c9d535b 73 return hugetlb_cgroup_from_css(h_cg->css.parent);
2bc64a20
AK
74}
75
3f798518 76static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
2bc64a20 77{
c37213c5 78 struct hstate *h;
2bc64a20 79
c37213c5 80 for_each_hstate(h) {
1adc4d41 81 if (page_counter_read(
c37213c5 82 hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
2bc64a20
AK
83 return true;
84 }
85 return false;
86}
87
297880f4
DR
88static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
89 struct hugetlb_cgroup *parent_h_cgroup)
90{
91 int idx;
92
93 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
1adc4d41
MA
94 struct page_counter *fault_parent = NULL;
95 struct page_counter *rsvd_parent = NULL;
297880f4
DR
96 unsigned long limit;
97 int ret;
98
1adc4d41
MA
99 if (parent_h_cgroup) {
100 fault_parent = hugetlb_cgroup_counter_from_cgroup(
101 parent_h_cgroup, idx);
102 rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
103 parent_h_cgroup, idx);
104 }
105 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
106 idx),
107 fault_parent);
108 page_counter_init(
109 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
110 rsvd_parent);
297880f4
DR
111
112 limit = round_down(PAGE_COUNTER_MAX,
8938494c 113 pages_per_huge_page(&hstates[idx]));
1adc4d41
MA
114
115 ret = page_counter_set_max(
116 hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
117 limit);
118 VM_BUG_ON(ret);
119 ret = page_counter_set_max(
120 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
121 limit);
297880f4
DR
122 VM_BUG_ON(ret);
123 }
124}
125
f4776199
MA
126static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
127{
128 int node;
129
130 for_each_node(node)
131 kfree(h_cgroup->nodeinfo[node]);
132 kfree(h_cgroup);
133}
134
eb95419b
TH
135static struct cgroup_subsys_state *
136hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2bc64a20 137{
eb95419b
TH
138 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
139 struct hugetlb_cgroup *h_cgroup;
f4776199
MA
140 int node;
141
142 h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
143 GFP_KERNEL);
2bc64a20 144
2bc64a20
AK
145 if (!h_cgroup)
146 return ERR_PTR(-ENOMEM);
147
297880f4 148 if (!parent_h_cgroup)
2bc64a20 149 root_h_cgroup = h_cgroup;
297880f4 150
f4776199
MA
151 /*
152 * TODO: this routine can waste much memory for nodes which will
153 * never be onlined. It's better to use memory hotplug callback
154 * function.
155 */
156 for_each_node(node) {
99249387 157 /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
f4776199 158 int node_to_alloc =
99249387 159 node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
f4776199
MA
160 h_cgroup->nodeinfo[node] =
161 kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
162 GFP_KERNEL, node_to_alloc);
163 if (!h_cgroup->nodeinfo[node])
164 goto fail_alloc_nodeinfo;
165 }
166
297880f4 167 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
2bc64a20 168 return &h_cgroup->css;
f4776199
MA
169
170fail_alloc_nodeinfo:
171 hugetlb_cgroup_free(h_cgroup);
172 return ERR_PTR(-ENOMEM);
2bc64a20
AK
173}
174
eb95419b 175static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
2bc64a20 176{
f4776199 177 hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
2bc64a20
AK
178}
179
da1def55
AK
180/*
181 * Should be called with hugetlb_lock held.
182 * Since we are holding hugetlb_lock, pages cannot get moved from
183 * active list or uncharged from the cgroup, So no need to get
184 * page reference and test for page active here. This function
185 * cannot fail.
186 */
3f798518 187static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
da1def55
AK
188 struct page *page)
189{
71f87bee
JW
190 unsigned int nr_pages;
191 struct page_counter *counter;
da1def55 192 struct hugetlb_cgroup *page_hcg;
3f798518 193 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
f074732d 194 struct folio *folio = page_folio(page);
da1def55 195
f074732d 196 page_hcg = hugetlb_cgroup_from_folio(folio);
da1def55
AK
197 /*
198 * We can have pages in active list without any cgroup
199 * ie, hugepage with less than 3 pages. We can safely
200 * ignore those pages.
201 */
202 if (!page_hcg || page_hcg != h_cg)
203 goto out;
204
d8c6546b 205 nr_pages = compound_nr(page);
da1def55
AK
206 if (!parent) {
207 parent = root_h_cgroup;
208 /* root has no limit */
71f87bee 209 page_counter_charge(&parent->hugepage[idx], nr_pages);
da1def55
AK
210 }
211 counter = &h_cg->hugepage[idx];
71f87bee
JW
212 /* Take the pages off the local counter */
213 page_counter_cancel(counter, nr_pages);
da1def55
AK
214
215 set_hugetlb_cgroup(page, parent);
216out:
217 return;
218}
219
220/*
221 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
222 * the parent cgroup.
223 */
eb95419b 224static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
2bc64a20 225{
eb95419b 226 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
da1def55
AK
227 struct hstate *h;
228 struct page *page;
da1def55
AK
229
230 do {
da1def55 231 for_each_hstate(h) {
db71ef79 232 spin_lock_irq(&hugetlb_lock);
da1def55 233 list_for_each_entry(page, &h->hugepage_activelist, lru)
c37213c5 234 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
da1def55 235
db71ef79 236 spin_unlock_irq(&hugetlb_lock);
da1def55
AK
237 }
238 cond_resched();
3f798518 239 } while (hugetlb_cgroup_have_usage(h_cg));
2bc64a20
AK
240}
241
faced7e0
GS
242static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
243 enum hugetlb_memory_event event)
244{
245 atomic_long_inc(&hugetlb->events_local[idx][event]);
246 cgroup_file_notify(&hugetlb->events_local_file[idx]);
247
248 do {
249 atomic_long_inc(&hugetlb->events[idx][event]);
250 cgroup_file_notify(&hugetlb->events_file[idx]);
251 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
252 !hugetlb_cgroup_is_root(hugetlb));
253}
254
1adc4d41
MA
255static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
256 struct hugetlb_cgroup **ptr,
257 bool rsvd)
6d76dcf4
AK
258{
259 int ret = 0;
71f87bee 260 struct page_counter *counter;
6d76dcf4 261 struct hugetlb_cgroup *h_cg = NULL;
6d76dcf4
AK
262
263 if (hugetlb_cgroup_disabled())
264 goto done;
265 /*
266 * We don't charge any cgroup if the compound page have less
267 * than 3 pages.
268 */
269 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
270 goto done;
271again:
272 rcu_read_lock();
273 h_cg = hugetlb_cgroup_from_task(current);
0362f326 274 if (!css_tryget(&h_cg->css)) {
6d76dcf4
AK
275 rcu_read_unlock();
276 goto again;
277 }
278 rcu_read_unlock();
279
1adc4d41
MA
280 if (!page_counter_try_charge(
281 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
282 nr_pages, &counter)) {
6071ca52 283 ret = -ENOMEM;
726b7bbe 284 hugetlb_event(h_cg, idx, HUGETLB_MAX);
1adc4d41
MA
285 css_put(&h_cg->css);
286 goto done;
faced7e0 287 }
1adc4d41
MA
288 /* Reservations take a reference to the css because they do not get
289 * reparented.
290 */
291 if (!rsvd)
292 css_put(&h_cg->css);
6d76dcf4
AK
293done:
294 *ptr = h_cg;
295 return ret;
296}
297
1adc4d41
MA
298int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
299 struct hugetlb_cgroup **ptr)
300{
301 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
302}
303
304int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
305 struct hugetlb_cgroup **ptr)
306{
307 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
308}
309
94ae8ba7 310/* Should be called with hugetlb_lock held */
1adc4d41
MA
311static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
312 struct hugetlb_cgroup *h_cg,
313 struct page *page, bool rsvd)
6d76dcf4
AK
314{
315 if (hugetlb_cgroup_disabled() || !h_cg)
316 return;
317
a098c977 318 __set_hugetlb_cgroup(page_folio(page), h_cg, rsvd);
f4776199
MA
319 if (!rsvd) {
320 unsigned long usage =
321 h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
322 /*
323 * This write is not atomic due to fetching usage and writing
324 * to it, but that's fine because we call this with
325 * hugetlb_lock held anyway.
326 */
327 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
328 usage + nr_pages);
329 }
6d76dcf4
AK
330}
331
1adc4d41
MA
332void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
333 struct hugetlb_cgroup *h_cg,
334 struct page *page)
335{
336 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
337}
338
339void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
340 struct hugetlb_cgroup *h_cg,
341 struct page *page)
342{
343 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
344}
345
6d76dcf4
AK
346/*
347 * Should be called with hugetlb_lock held
348 */
1adc4d41
MA
349static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
350 struct page *page, bool rsvd)
6d76dcf4
AK
351{
352 struct hugetlb_cgroup *h_cg;
f074732d 353 struct folio *folio = page_folio(page);
6d76dcf4
AK
354
355 if (hugetlb_cgroup_disabled())
356 return;
7ea8574e 357 lockdep_assert_held(&hugetlb_lock);
f074732d 358 h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
6d76dcf4
AK
359 if (unlikely(!h_cg))
360 return;
f074732d 361 __set_hugetlb_cgroup(folio, NULL, rsvd);
1adc4d41
MA
362
363 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
364 rsvd),
365 nr_pages);
366
367 if (rsvd)
368 css_put(&h_cg->css);
f4776199
MA
369 else {
370 unsigned long usage =
371 h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
372 /*
373 * This write is not atomic due to fetching usage and writing
374 * to it, but that's fine because we call this with
375 * hugetlb_lock held anyway.
376 */
377 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
378 usage - nr_pages);
379 }
6d76dcf4
AK
380}
381
1adc4d41
MA
382void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
383 struct page *page)
384{
385 __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
386}
387
388void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
389 struct page *page)
390{
391 __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
392}
393
394static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
395 struct hugetlb_cgroup *h_cg,
396 bool rsvd)
6d76dcf4 397{
6d76dcf4
AK
398 if (hugetlb_cgroup_disabled() || !h_cg)
399 return;
400
401 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
402 return;
403
1adc4d41
MA
404 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
405 rsvd),
406 nr_pages);
407
408 if (rsvd)
409 css_put(&h_cg->css);
410}
411
412void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
413 struct hugetlb_cgroup *h_cg)
414{
415 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
416}
417
418void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
419 struct hugetlb_cgroup *h_cg)
420{
421 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
422}
423
e9fe92ae
MA
424void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
425 unsigned long end)
1adc4d41 426{
e9fe92ae
MA
427 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
428 !resv->css)
1adc4d41
MA
429 return;
430
e9fe92ae
MA
431 page_counter_uncharge(resv->reservation_counter,
432 (end - start) * resv->pages_per_hpage);
433 css_put(resv->css);
6d76dcf4
AK
434}
435
075a61d0
MA
436void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
437 struct file_region *rg,
d85aecf2
ML
438 unsigned long nr_pages,
439 bool region_del)
075a61d0
MA
440{
441 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
442 return;
443
862f7f65 444 if (rg->reservation_counter && resv->pages_per_hpage &&
075a61d0
MA
445 !resv->reservation_counter) {
446 page_counter_uncharge(rg->reservation_counter,
447 nr_pages * resv->pages_per_hpage);
d85aecf2
ML
448 /*
449 * Only do css_put(rg->css) when we delete the entire region
450 * because one file_region must hold exactly one css reference.
451 */
452 if (region_del)
453 css_put(rg->css);
075a61d0
MA
454 }
455}
456
71f87bee
JW
457enum {
458 RES_USAGE,
cdc2fcfe 459 RES_RSVD_USAGE,
71f87bee 460 RES_LIMIT,
cdc2fcfe 461 RES_RSVD_LIMIT,
71f87bee 462 RES_MAX_USAGE,
cdc2fcfe 463 RES_RSVD_MAX_USAGE,
71f87bee 464 RES_FAILCNT,
cdc2fcfe 465 RES_RSVD_FAILCNT,
71f87bee
JW
466};
467
f4776199
MA
468static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
469{
470 int nid;
471 struct cftype *cft = seq_cft(seq);
472 int idx = MEMFILE_IDX(cft->private);
473 bool legacy = MEMFILE_ATTR(cft->private);
474 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
475 struct cgroup_subsys_state *css;
476 unsigned long usage;
477
478 if (legacy) {
479 /* Add up usage across all nodes for the non-hierarchical total. */
480 usage = 0;
481 for_each_node_state(nid, N_MEMORY)
482 usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
483 seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
484
485 /* Simply print the per-node usage for the non-hierarchical total. */
486 for_each_node_state(nid, N_MEMORY)
487 seq_printf(seq, " N%d=%lu", nid,
488 READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
489 PAGE_SIZE);
490 seq_putc(seq, '\n');
491 }
492
493 /*
494 * The hierarchical total is pretty much the value recorded by the
495 * counter, so use that.
496 */
497 seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
498 page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
499
500 /*
501 * For each node, transverse the css tree to obtain the hierarchical
502 * node usage.
503 */
504 for_each_node_state(nid, N_MEMORY) {
505 usage = 0;
506 rcu_read_lock();
507 css_for_each_descendant_pre(css, &h_cg->css) {
508 usage += READ_ONCE(hugetlb_cgroup_from_css(css)
509 ->nodeinfo[nid]
510 ->usage[idx]);
511 }
512 rcu_read_unlock();
513 seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
514 }
515
516 seq_putc(seq, '\n');
517
518 return 0;
519}
520
716f479d
TH
521static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
522 struct cftype *cft)
abb8206c 523{
71f87bee 524 struct page_counter *counter;
cdc2fcfe 525 struct page_counter *rsvd_counter;
182446d0 526 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
abb8206c 527
71f87bee 528 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
cdc2fcfe 529 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
abb8206c 530
71f87bee
JW
531 switch (MEMFILE_ATTR(cft->private)) {
532 case RES_USAGE:
533 return (u64)page_counter_read(counter) * PAGE_SIZE;
cdc2fcfe
MA
534 case RES_RSVD_USAGE:
535 return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
71f87bee 536 case RES_LIMIT:
bbec2e15 537 return (u64)counter->max * PAGE_SIZE;
cdc2fcfe
MA
538 case RES_RSVD_LIMIT:
539 return (u64)rsvd_counter->max * PAGE_SIZE;
71f87bee
JW
540 case RES_MAX_USAGE:
541 return (u64)counter->watermark * PAGE_SIZE;
cdc2fcfe
MA
542 case RES_RSVD_MAX_USAGE:
543 return (u64)rsvd_counter->watermark * PAGE_SIZE;
71f87bee
JW
544 case RES_FAILCNT:
545 return counter->failcnt;
cdc2fcfe
MA
546 case RES_RSVD_FAILCNT:
547 return rsvd_counter->failcnt;
71f87bee
JW
548 default:
549 BUG();
550 }
abb8206c
AK
551}
552
faced7e0
GS
553static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
554{
555 int idx;
556 u64 val;
557 struct cftype *cft = seq_cft(seq);
558 unsigned long limit;
559 struct page_counter *counter;
560 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
561
562 idx = MEMFILE_IDX(cft->private);
563 counter = &h_cg->hugepage[idx];
564
565 limit = round_down(PAGE_COUNTER_MAX,
8938494c 566 pages_per_huge_page(&hstates[idx]));
faced7e0
GS
567
568 switch (MEMFILE_ATTR(cft->private)) {
cdc2fcfe
MA
569 case RES_RSVD_USAGE:
570 counter = &h_cg->rsvd_hugepage[idx];
e4a9bc58 571 fallthrough;
faced7e0
GS
572 case RES_USAGE:
573 val = (u64)page_counter_read(counter);
574 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
575 break;
cdc2fcfe
MA
576 case RES_RSVD_LIMIT:
577 counter = &h_cg->rsvd_hugepage[idx];
e4a9bc58 578 fallthrough;
faced7e0
GS
579 case RES_LIMIT:
580 val = (u64)counter->max;
581 if (val == limit)
582 seq_puts(seq, "max\n");
583 else
584 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
585 break;
586 default:
587 BUG();
588 }
589
590 return 0;
591}
592
71f87bee
JW
593static DEFINE_MUTEX(hugetlb_limit_mutex);
594
451af504 595static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
faced7e0
GS
596 char *buf, size_t nbytes, loff_t off,
597 const char *max)
abb8206c 598{
71f87bee
JW
599 int ret, idx;
600 unsigned long nr_pages;
451af504 601 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
cdc2fcfe 602 bool rsvd = false;
abb8206c 603
71f87bee
JW
604 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
605 return -EINVAL;
606
451af504 607 buf = strstrip(buf);
faced7e0 608 ret = page_counter_memparse(buf, max, &nr_pages);
71f87bee
JW
609 if (ret)
610 return ret;
611
451af504 612 idx = MEMFILE_IDX(of_cft(of)->private);
8938494c 613 nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
abb8206c 614
71f87bee 615 switch (MEMFILE_ATTR(of_cft(of)->private)) {
cdc2fcfe
MA
616 case RES_RSVD_LIMIT:
617 rsvd = true;
e4a9bc58 618 fallthrough;
abb8206c 619 case RES_LIMIT:
71f87bee 620 mutex_lock(&hugetlb_limit_mutex);
cdc2fcfe 621 ret = page_counter_set_max(
1adc4d41 622 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
cdc2fcfe 623 nr_pages);
71f87bee 624 mutex_unlock(&hugetlb_limit_mutex);
abb8206c
AK
625 break;
626 default:
627 ret = -EINVAL;
628 break;
629 }
451af504 630 return ret ?: nbytes;
abb8206c
AK
631}
632
faced7e0
GS
633static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
634 char *buf, size_t nbytes, loff_t off)
635{
636 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
637}
638
639static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
640 char *buf, size_t nbytes, loff_t off)
641{
642 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
643}
644
6770c64e
TH
645static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
646 char *buf, size_t nbytes, loff_t off)
abb8206c 647{
71f87bee 648 int ret = 0;
cdc2fcfe 649 struct page_counter *counter, *rsvd_counter;
6770c64e 650 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
abb8206c 651
71f87bee 652 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
cdc2fcfe 653 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
abb8206c 654
71f87bee 655 switch (MEMFILE_ATTR(of_cft(of)->private)) {
abb8206c 656 case RES_MAX_USAGE:
71f87bee 657 page_counter_reset_watermark(counter);
abb8206c 658 break;
cdc2fcfe
MA
659 case RES_RSVD_MAX_USAGE:
660 page_counter_reset_watermark(rsvd_counter);
661 break;
abb8206c 662 case RES_FAILCNT:
71f87bee 663 counter->failcnt = 0;
abb8206c 664 break;
cdc2fcfe
MA
665 case RES_RSVD_FAILCNT:
666 rsvd_counter->failcnt = 0;
667 break;
abb8206c
AK
668 default:
669 ret = -EINVAL;
670 break;
671 }
6770c64e 672 return ret ?: nbytes;
abb8206c
AK
673}
674
675static char *mem_fmt(char *buf, int size, unsigned long hsize)
676{
abfb09e2
ML
677 if (hsize >= SZ_1G)
678 snprintf(buf, size, "%luGB", hsize / SZ_1G);
679 else if (hsize >= SZ_1M)
680 snprintf(buf, size, "%luMB", hsize / SZ_1M);
abb8206c 681 else
abfb09e2 682 snprintf(buf, size, "%luKB", hsize / SZ_1K);
abb8206c
AK
683 return buf;
684}
685
faced7e0
GS
686static int __hugetlb_events_show(struct seq_file *seq, bool local)
687{
688 int idx;
689 long max;
690 struct cftype *cft = seq_cft(seq);
691 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
692
693 idx = MEMFILE_IDX(cft->private);
694
695 if (local)
696 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
697 else
698 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
699
700 seq_printf(seq, "max %lu\n", max);
701
702 return 0;
703}
704
705static int hugetlb_events_show(struct seq_file *seq, void *v)
706{
707 return __hugetlb_events_show(seq, false);
708}
709
710static int hugetlb_events_local_show(struct seq_file *seq, void *v)
711{
712 return __hugetlb_events_show(seq, true);
713}
714
715static void __init __hugetlb_cgroup_file_dfl_init(int idx)
abb8206c
AK
716{
717 char buf[32];
718 struct cftype *cft;
719 struct hstate *h = &hstates[idx];
720
721 /* format the size */
cdc2fcfe 722 mem_fmt(buf, sizeof(buf), huge_page_size(h));
abb8206c
AK
723
724 /* Add the limit file */
faced7e0
GS
725 cft = &h->cgroup_files_dfl[0];
726 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
727 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
728 cft->seq_show = hugetlb_cgroup_read_u64_max;
729 cft->write = hugetlb_cgroup_write_dfl;
730 cft->flags = CFTYPE_NOT_ON_ROOT;
731
cdc2fcfe 732 /* Add the reservation limit file */
faced7e0 733 cft = &h->cgroup_files_dfl[1];
cdc2fcfe
MA
734 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
735 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
736 cft->seq_show = hugetlb_cgroup_read_u64_max;
737 cft->write = hugetlb_cgroup_write_dfl;
738 cft->flags = CFTYPE_NOT_ON_ROOT;
739
740 /* Add the current usage file */
741 cft = &h->cgroup_files_dfl[2];
faced7e0
GS
742 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
743 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
744 cft->seq_show = hugetlb_cgroup_read_u64_max;
745 cft->flags = CFTYPE_NOT_ON_ROOT;
746
cdc2fcfe
MA
747 /* Add the current reservation usage file */
748 cft = &h->cgroup_files_dfl[3];
749 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
750 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
751 cft->seq_show = hugetlb_cgroup_read_u64_max;
752 cft->flags = CFTYPE_NOT_ON_ROOT;
753
faced7e0 754 /* Add the events file */
cdc2fcfe 755 cft = &h->cgroup_files_dfl[4];
faced7e0
GS
756 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
757 cft->private = MEMFILE_PRIVATE(idx, 0);
758 cft->seq_show = hugetlb_events_show;
d5a16959 759 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
faced7e0
GS
760 cft->flags = CFTYPE_NOT_ON_ROOT;
761
762 /* Add the events.local file */
cdc2fcfe 763 cft = &h->cgroup_files_dfl[5];
faced7e0
GS
764 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
765 cft->private = MEMFILE_PRIVATE(idx, 0);
766 cft->seq_show = hugetlb_events_local_show;
767 cft->file_offset = offsetof(struct hugetlb_cgroup,
d5a16959 768 events_local_file[idx]);
faced7e0
GS
769 cft->flags = CFTYPE_NOT_ON_ROOT;
770
f4776199 771 /* Add the numa stat file */
cdc2fcfe 772 cft = &h->cgroup_files_dfl[6];
f4776199 773 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
2727cfe4 774 cft->private = MEMFILE_PRIVATE(idx, 0);
f4776199
MA
775 cft->seq_show = hugetlb_cgroup_read_numa_stat;
776 cft->flags = CFTYPE_NOT_ON_ROOT;
777
778 /* NULL terminate the last cft */
779 cft = &h->cgroup_files_dfl[7];
faced7e0
GS
780 memset(cft, 0, sizeof(*cft));
781
782 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
783 h->cgroup_files_dfl));
784}
785
786static void __init __hugetlb_cgroup_file_legacy_init(int idx)
787{
788 char buf[32];
789 struct cftype *cft;
790 struct hstate *h = &hstates[idx];
791
792 /* format the size */
cdc2fcfe 793 mem_fmt(buf, sizeof(buf), huge_page_size(h));
faced7e0
GS
794
795 /* Add the limit file */
796 cft = &h->cgroup_files_legacy[0];
abb8206c
AK
797 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
798 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
716f479d 799 cft->read_u64 = hugetlb_cgroup_read_u64;
faced7e0 800 cft->write = hugetlb_cgroup_write_legacy;
abb8206c 801
cdc2fcfe 802 /* Add the reservation limit file */
faced7e0 803 cft = &h->cgroup_files_legacy[1];
cdc2fcfe
MA
804 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
805 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
806 cft->read_u64 = hugetlb_cgroup_read_u64;
807 cft->write = hugetlb_cgroup_write_legacy;
808
809 /* Add the usage file */
810 cft = &h->cgroup_files_legacy[2];
abb8206c
AK
811 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
812 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
716f479d 813 cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206c 814
cdc2fcfe
MA
815 /* Add the reservation usage file */
816 cft = &h->cgroup_files_legacy[3];
817 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
818 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
819 cft->read_u64 = hugetlb_cgroup_read_u64;
820
abb8206c 821 /* Add the MAX usage file */
cdc2fcfe 822 cft = &h->cgroup_files_legacy[4];
abb8206c
AK
823 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
824 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
6770c64e 825 cft->write = hugetlb_cgroup_reset;
716f479d 826 cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206c 827
cdc2fcfe
MA
828 /* Add the MAX reservation usage file */
829 cft = &h->cgroup_files_legacy[5];
830 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
831 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
832 cft->write = hugetlb_cgroup_reset;
833 cft->read_u64 = hugetlb_cgroup_read_u64;
834
abb8206c 835 /* Add the failcntfile */
cdc2fcfe 836 cft = &h->cgroup_files_legacy[6];
abb8206c 837 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
cdc2fcfe
MA
838 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
839 cft->write = hugetlb_cgroup_reset;
840 cft->read_u64 = hugetlb_cgroup_read_u64;
841
842 /* Add the reservation failcntfile */
843 cft = &h->cgroup_files_legacy[7];
844 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
845 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
6770c64e 846 cft->write = hugetlb_cgroup_reset;
716f479d 847 cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206c 848
f4776199 849 /* Add the numa stat file */
cdc2fcfe 850 cft = &h->cgroup_files_legacy[8];
f4776199
MA
851 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
852 cft->private = MEMFILE_PRIVATE(idx, 1);
853 cft->seq_show = hugetlb_cgroup_read_numa_stat;
854
855 /* NULL terminate the last cft */
856 cft = &h->cgroup_files_legacy[9];
abb8206c
AK
857 memset(cft, 0, sizeof(*cft));
858
2cf669a5 859 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
faced7e0
GS
860 h->cgroup_files_legacy));
861}
862
863static void __init __hugetlb_cgroup_file_init(int idx)
864{
865 __hugetlb_cgroup_file_dfl_init(idx);
866 __hugetlb_cgroup_file_legacy_init(idx);
7179e7bf
JW
867}
868
869void __init hugetlb_cgroup_file_init(void)
870{
871 struct hstate *h;
872
873 for_each_hstate(h) {
874 /*
875 * Add cgroup control files only if the huge page consists
876 * of more than two normal pages. This is because we use
1d798ca3 877 * page[2].private for storing cgroup details.
7179e7bf
JW
878 */
879 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
880 __hugetlb_cgroup_file_init(hstate_index(h));
881 }
abb8206c
AK
882}
883
75754681
AK
884/*
885 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
886 * when we migrate hugepages
887 */
8e6ac7fa
AK
888void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
889{
890 struct hugetlb_cgroup *h_cg;
1adc4d41 891 struct hugetlb_cgroup *h_cg_rsvd;
94ae8ba7 892 struct hstate *h = page_hstate(oldhpage);
f074732d 893 struct folio *old_folio = page_folio(oldhpage);
8e6ac7fa
AK
894
895 if (hugetlb_cgroup_disabled())
896 return;
897
db71ef79 898 spin_lock_irq(&hugetlb_lock);
f074732d
SK
899 h_cg = hugetlb_cgroup_from_folio(old_folio);
900 h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
8e6ac7fa 901 set_hugetlb_cgroup(oldhpage, NULL);
9808895e 902 set_hugetlb_cgroup_rsvd(oldhpage, NULL);
8e6ac7fa
AK
903
904 /* move the h_cg details to new cgroup */
9808895e 905 set_hugetlb_cgroup(newhpage, h_cg);
1adc4d41 906 set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
94ae8ba7 907 list_move(&newhpage->lru, &h->hugepage_activelist);
db71ef79 908 spin_unlock_irq(&hugetlb_lock);
8e6ac7fa
AK
909 return;
910}
911
faced7e0
GS
912static struct cftype hugetlb_files[] = {
913 {} /* terminate */
914};
915
073219e9 916struct cgroup_subsys hugetlb_cgrp_subsys = {
92fb9748
TH
917 .css_alloc = hugetlb_cgroup_css_alloc,
918 .css_offline = hugetlb_cgroup_css_offline,
919 .css_free = hugetlb_cgroup_css_free,
faced7e0
GS
920 .dfl_cftypes = hugetlb_files,
921 .legacy_cftypes = hugetlb_files,
2bc64a20 922};