]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/list_lru.c
mm: memcontrol: move memcg_online_kmem() to mem_cgroup_css_online()
[thirdparty/linux.git] / mm / list_lru.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
a38e4082
DC
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
3b1d58a4 10#include <linux/mm.h>
a38e4082 11#include <linux/list_lru.h>
5ca302c8 12#include <linux/slab.h>
c0a5b560 13#include <linux/mutex.h>
60d3fd32 14#include <linux/memcontrol.h>
4d96ba35 15#include "slab.h"
88f2ef73 16#include "internal.h"
c0a5b560 17
84c07d11 18#ifdef CONFIG_MEMCG_KMEM
3eef1127 19static LIST_HEAD(memcg_list_lrus);
c0a5b560
VD
20static DEFINE_MUTEX(list_lrus_mutex);
21
3eef1127
MS
22static inline bool list_lru_memcg_aware(struct list_lru *lru)
23{
24 return lru->memcg_aware;
25}
26
c0a5b560
VD
27static void list_lru_register(struct list_lru *lru)
28{
3eef1127
MS
29 if (!list_lru_memcg_aware(lru))
30 return;
31
c0a5b560 32 mutex_lock(&list_lrus_mutex);
3eef1127 33 list_add(&lru->list, &memcg_list_lrus);
c0a5b560
VD
34 mutex_unlock(&list_lrus_mutex);
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
3eef1127
MS
39 if (!list_lru_memcg_aware(lru))
40 return;
41
c0a5b560
VD
42 mutex_lock(&list_lrus_mutex);
43 list_del(&lru->list);
44 mutex_unlock(&list_lrus_mutex);
45}
c0a5b560 46
fae91d6d
KT
47static int lru_shrinker_id(struct list_lru *lru)
48{
49 return lru->shrinker_id;
50}
51
60d3fd32 52static inline struct list_lru_one *
6a6b7b77 53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
60d3fd32 54{
6a6b7b77
MS
55 struct list_lru_memcg *mlrus;
56 struct list_lru_node *nlru = &lru->node[nid];
57
60d3fd32 58 /*
0c7c1bed 59 * Either lock or RCU protects the array of per cgroup lists
6a6b7b77 60 * from relocation (see memcg_update_list_lru).
60d3fd32 61 */
6a6b7b77
MS
62 mlrus = rcu_dereference_check(lru->mlrus, lockdep_is_held(&nlru->lock));
63 if (mlrus && idx >= 0)
64 return &mlrus->mlru[idx]->node[nid];
60d3fd32
VD
65 return &nlru->lru;
66}
67
68static inline struct list_lru_one *
6a6b7b77 69list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
44bd4a47 70 struct mem_cgroup **memcg_ptr)
60d3fd32 71{
6a6b7b77 72 struct list_lru_node *nlru = &lru->node[nid];
44bd4a47
KT
73 struct list_lru_one *l = &nlru->lru;
74 struct mem_cgroup *memcg = NULL;
60d3fd32 75
6a6b7b77 76 if (!lru->mlrus)
44bd4a47 77 goto out;
60d3fd32 78
4f103c63 79 memcg = mem_cgroup_from_obj(ptr);
60d3fd32 80 if (!memcg)
44bd4a47 81 goto out;
60d3fd32 82
6a6b7b77 83 l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
44bd4a47
KT
84out:
85 if (memcg_ptr)
86 *memcg_ptr = memcg;
87 return l;
60d3fd32
VD
88}
89#else
e0295238
KT
90static void list_lru_register(struct list_lru *lru)
91{
92}
93
94static void list_lru_unregister(struct list_lru *lru)
95{
96}
97
fae91d6d
KT
98static int lru_shrinker_id(struct list_lru *lru)
99{
100 return -1;
101}
102
60d3fd32
VD
103static inline bool list_lru_memcg_aware(struct list_lru *lru)
104{
105 return false;
106}
107
108static inline struct list_lru_one *
6a6b7b77 109list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
60d3fd32 110{
6a6b7b77 111 return &lru->node[nid].lru;
60d3fd32
VD
112}
113
114static inline struct list_lru_one *
6a6b7b77 115list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
44bd4a47 116 struct mem_cgroup **memcg_ptr)
60d3fd32 117{
44bd4a47
KT
118 if (memcg_ptr)
119 *memcg_ptr = NULL;
6a6b7b77 120 return &lru->node[nid].lru;
60d3fd32 121}
84c07d11 122#endif /* CONFIG_MEMCG_KMEM */
60d3fd32 123
a38e4082
DC
124bool list_lru_add(struct list_lru *lru, struct list_head *item)
125{
3b1d58a4
DC
126 int nid = page_to_nid(virt_to_page(item));
127 struct list_lru_node *nlru = &lru->node[nid];
fae91d6d 128 struct mem_cgroup *memcg;
60d3fd32 129 struct list_lru_one *l;
3b1d58a4
DC
130
131 spin_lock(&nlru->lock);
a38e4082 132 if (list_empty(item)) {
6a6b7b77 133 l = list_lru_from_kmem(lru, nid, item, &memcg);
60d3fd32 134 list_add_tail(item, &l->list);
fae91d6d
KT
135 /* Set shrinker bit if the first element was added */
136 if (!l->nr_items++)
2bfd3637
YS
137 set_shrinker_bit(memcg, nid,
138 lru_shrinker_id(lru));
2c80cd57 139 nlru->nr_items++;
3b1d58a4 140 spin_unlock(&nlru->lock);
a38e4082
DC
141 return true;
142 }
3b1d58a4 143 spin_unlock(&nlru->lock);
a38e4082
DC
144 return false;
145}
146EXPORT_SYMBOL_GPL(list_lru_add);
147
148bool list_lru_del(struct list_lru *lru, struct list_head *item)
149{
3b1d58a4
DC
150 int nid = page_to_nid(virt_to_page(item));
151 struct list_lru_node *nlru = &lru->node[nid];
60d3fd32 152 struct list_lru_one *l;
3b1d58a4
DC
153
154 spin_lock(&nlru->lock);
a38e4082 155 if (!list_empty(item)) {
6a6b7b77 156 l = list_lru_from_kmem(lru, nid, item, NULL);
a38e4082 157 list_del_init(item);
60d3fd32 158 l->nr_items--;
2c80cd57 159 nlru->nr_items--;
3b1d58a4 160 spin_unlock(&nlru->lock);
a38e4082
DC
161 return true;
162 }
3b1d58a4 163 spin_unlock(&nlru->lock);
a38e4082
DC
164 return false;
165}
166EXPORT_SYMBOL_GPL(list_lru_del);
167
3f97b163
VD
168void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
169{
170 list_del_init(item);
171 list->nr_items--;
172}
173EXPORT_SYMBOL_GPL(list_lru_isolate);
174
175void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
176 struct list_head *head)
177{
178 list_move(item, head);
179 list->nr_items--;
180}
181EXPORT_SYMBOL_GPL(list_lru_isolate_move);
182
930eaac5
AM
183unsigned long list_lru_count_one(struct list_lru *lru,
184 int nid, struct mem_cgroup *memcg)
a38e4082 185{
60d3fd32 186 struct list_lru_one *l;
41d17431 187 long count;
3b1d58a4 188
0c7c1bed 189 rcu_read_lock();
6a6b7b77 190 l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
a1f45935 191 count = READ_ONCE(l->nr_items);
0c7c1bed 192 rcu_read_unlock();
3b1d58a4 193
41d17431
MS
194 if (unlikely(count < 0))
195 count = 0;
196
3b1d58a4
DC
197 return count;
198}
60d3fd32
VD
199EXPORT_SYMBOL_GPL(list_lru_count_one);
200
201unsigned long list_lru_count_node(struct list_lru *lru, int nid)
202{
2c80cd57 203 struct list_lru_node *nlru;
60d3fd32 204
2c80cd57
ST
205 nlru = &lru->node[nid];
206 return nlru->nr_items;
60d3fd32 207}
6a4f496f 208EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4 209
60d3fd32 210static unsigned long
6a6b7b77 211__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
60d3fd32
VD
212 list_lru_walk_cb isolate, void *cb_arg,
213 unsigned long *nr_to_walk)
3b1d58a4 214{
6a6b7b77 215 struct list_lru_node *nlru = &lru->node[nid];
60d3fd32 216 struct list_lru_one *l;
a38e4082 217 struct list_head *item, *n;
3b1d58a4 218 unsigned long isolated = 0;
a38e4082 219
6a6b7b77 220 l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
a38e4082 221restart:
60d3fd32 222 list_for_each_safe(item, n, &l->list) {
a38e4082 223 enum lru_status ret;
5cedf721
DC
224
225 /*
226 * decrement nr_to_walk first so that we don't livelock if we
3dc5f032 227 * get stuck on large numbers of LRU_RETRY items
5cedf721 228 */
c56b097a 229 if (!*nr_to_walk)
5cedf721 230 break;
c56b097a 231 --*nr_to_walk;
5cedf721 232
3f97b163 233 ret = isolate(item, l, &nlru->lock, cb_arg);
a38e4082 234 switch (ret) {
449dd698
JW
235 case LRU_REMOVED_RETRY:
236 assert_spin_locked(&nlru->lock);
e4a9bc58 237 fallthrough;
a38e4082 238 case LRU_REMOVED:
3b1d58a4 239 isolated++;
2c80cd57 240 nlru->nr_items--;
449dd698
JW
241 /*
242 * If the lru lock has been dropped, our list
243 * traversal is now invalid and so we have to
244 * restart from scratch.
245 */
246 if (ret == LRU_REMOVED_RETRY)
247 goto restart;
a38e4082
DC
248 break;
249 case LRU_ROTATE:
60d3fd32 250 list_move_tail(item, &l->list);
a38e4082
DC
251 break;
252 case LRU_SKIP:
253 break;
254 case LRU_RETRY:
5cedf721
DC
255 /*
256 * The lru lock has been dropped, our list traversal is
257 * now invalid and so we have to restart from scratch.
258 */
449dd698 259 assert_spin_locked(&nlru->lock);
a38e4082
DC
260 goto restart;
261 default:
262 BUG();
263 }
a38e4082 264 }
3b1d58a4
DC
265 return isolated;
266}
60d3fd32
VD
267
268unsigned long
269list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
270 list_lru_walk_cb isolate, void *cb_arg,
271 unsigned long *nr_to_walk)
272{
6cfe57a9
SAS
273 struct list_lru_node *nlru = &lru->node[nid];
274 unsigned long ret;
275
276 spin_lock(&nlru->lock);
6a6b7b77
MS
277 ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
278 cb_arg, nr_to_walk);
6cfe57a9
SAS
279 spin_unlock(&nlru->lock);
280 return ret;
60d3fd32
VD
281}
282EXPORT_SYMBOL_GPL(list_lru_walk_one);
283
6b51e881
SAS
284unsigned long
285list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
286 list_lru_walk_cb isolate, void *cb_arg,
287 unsigned long *nr_to_walk)
288{
289 struct list_lru_node *nlru = &lru->node[nid];
290 unsigned long ret;
291
292 spin_lock_irq(&nlru->lock);
6a6b7b77
MS
293 ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
294 cb_arg, nr_to_walk);
6b51e881
SAS
295 spin_unlock_irq(&nlru->lock);
296 return ret;
297}
298
60d3fd32
VD
299unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
300 list_lru_walk_cb isolate, void *cb_arg,
301 unsigned long *nr_to_walk)
302{
303 long isolated = 0;
304 int memcg_idx;
305
87a5ffc1
SAS
306 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
307 nr_to_walk);
60d3fd32
VD
308 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
309 for_each_memcg_cache_index(memcg_idx) {
6cfe57a9
SAS
310 struct list_lru_node *nlru = &lru->node[nid];
311
312 spin_lock(&nlru->lock);
6a6b7b77 313 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
6e018968
SAS
314 isolate, cb_arg,
315 nr_to_walk);
6cfe57a9
SAS
316 spin_unlock(&nlru->lock);
317
60d3fd32
VD
318 if (*nr_to_walk <= 0)
319 break;
320 }
321 }
322 return isolated;
323}
3b1d58a4
DC
324EXPORT_SYMBOL_GPL(list_lru_walk_node);
325
60d3fd32
VD
326static void init_one_lru(struct list_lru_one *l)
327{
328 INIT_LIST_HEAD(&l->list);
329 l->nr_items = 0;
330}
331
84c07d11 332#ifdef CONFIG_MEMCG_KMEM
6a6b7b77
MS
333static void memcg_destroy_list_lru_range(struct list_lru_memcg *mlrus,
334 int begin, int end)
60d3fd32
VD
335{
336 int i;
337
338 for (i = begin; i < end; i++)
6a6b7b77 339 kfree(mlrus->mlru[i]);
60d3fd32
VD
340}
341
88f2ef73
MS
342static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
343{
344 int nid;
345 struct list_lru_per_memcg *mlru;
346
347 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
348 if (!mlru)
349 return NULL;
350
351 for_each_node(nid)
352 init_one_lru(&mlru->node[nid]);
353
354 return mlru;
355}
356
6a6b7b77
MS
357static int memcg_init_list_lru_range(struct list_lru_memcg *mlrus,
358 int begin, int end)
60d3fd32
VD
359{
360 int i;
361
362 for (i = begin; i < end; i++) {
88f2ef73
MS
363 mlrus->mlru[i] = memcg_init_list_lru_one(GFP_KERNEL);
364 if (!mlrus->mlru[i])
60d3fd32 365 goto fail;
60d3fd32
VD
366 }
367 return 0;
368fail:
6a6b7b77 369 memcg_destroy_list_lru_range(mlrus, begin, i);
60d3fd32
VD
370 return -ENOMEM;
371}
372
6a6b7b77 373static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
60d3fd32 374{
6a6b7b77 375 struct list_lru_memcg *mlrus;
60d3fd32
VD
376 int size = memcg_nr_cache_ids;
377
6a6b7b77
MS
378 lru->memcg_aware = memcg_aware;
379 if (!memcg_aware)
380 return 0;
381
88f2ef73
MS
382 spin_lock_init(&lru->lock);
383
6a6b7b77
MS
384 mlrus = kvmalloc(struct_size(mlrus, mlru, size), GFP_KERNEL);
385 if (!mlrus)
60d3fd32
VD
386 return -ENOMEM;
387
6a6b7b77
MS
388 if (memcg_init_list_lru_range(mlrus, 0, size)) {
389 kvfree(mlrus);
60d3fd32
VD
390 return -ENOMEM;
391 }
6a6b7b77 392 RCU_INIT_POINTER(lru->mlrus, mlrus);
60d3fd32
VD
393
394 return 0;
395}
396
6a6b7b77 397static void memcg_destroy_list_lru(struct list_lru *lru)
60d3fd32 398{
6a6b7b77
MS
399 struct list_lru_memcg *mlrus;
400
401 if (!list_lru_memcg_aware(lru))
402 return;
403
0c7c1bed
KT
404 /*
405 * This is called when shrinker has already been unregistered,
a7b7e1df 406 * and nobody can use it. So, there is no need to use kvfree_rcu().
0c7c1bed 407 */
6a6b7b77
MS
408 mlrus = rcu_dereference_protected(lru->mlrus, true);
409 memcg_destroy_list_lru_range(mlrus, 0, memcg_nr_cache_ids);
410 kvfree(mlrus);
0c7c1bed
KT
411}
412
6a6b7b77 413static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size)
60d3fd32
VD
414{
415 struct list_lru_memcg *old, *new;
416
417 BUG_ON(old_size > new_size);
418
6a6b7b77 419 old = rcu_dereference_protected(lru->mlrus,
0c7c1bed 420 lockdep_is_held(&list_lrus_mutex));
6a6b7b77 421 new = kvmalloc(struct_size(new, mlru, new_size), GFP_KERNEL);
60d3fd32
VD
422 if (!new)
423 return -ENOMEM;
424
6a6b7b77 425 if (memcg_init_list_lru_range(new, old_size, new_size)) {
f80c7dab 426 kvfree(new);
60d3fd32
VD
427 return -ENOMEM;
428 }
429
88f2ef73 430 spin_lock_irq(&lru->lock);
6a6b7b77
MS
431 memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size));
432 rcu_assign_pointer(lru->mlrus, new);
88f2ef73
MS
433 spin_unlock_irq(&lru->lock);
434
a7b7e1df 435 kvfree_rcu(old, rcu);
60d3fd32
VD
436 return 0;
437}
438
60d3fd32
VD
439static void memcg_cancel_update_list_lru(struct list_lru *lru,
440 int old_size, int new_size)
441{
6a6b7b77 442 struct list_lru_memcg *mlrus;
60d3fd32 443
6a6b7b77
MS
444 mlrus = rcu_dereference_protected(lru->mlrus,
445 lockdep_is_held(&list_lrus_mutex));
446 /*
447 * Do not bother shrinking the array back to the old size, because we
448 * cannot handle allocation failures here.
449 */
450 memcg_destroy_list_lru_range(mlrus, old_size, new_size);
60d3fd32
VD
451}
452
453int memcg_update_all_list_lrus(int new_size)
454{
455 int ret = 0;
456 struct list_lru *lru;
457 int old_size = memcg_nr_cache_ids;
458
459 mutex_lock(&list_lrus_mutex);
3eef1127 460 list_for_each_entry(lru, &memcg_list_lrus, list) {
60d3fd32
VD
461 ret = memcg_update_list_lru(lru, old_size, new_size);
462 if (ret)
463 goto fail;
464 }
465out:
466 mutex_unlock(&list_lrus_mutex);
467 return ret;
468fail:
3eef1127 469 list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list)
60d3fd32
VD
470 memcg_cancel_update_list_lru(lru, old_size, new_size);
471 goto out;
472}
2788cf0c 473
3b82c4dc 474static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
9bec5c35 475 int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c 476{
3b82c4dc 477 struct list_lru_node *nlru = &lru->node[nid];
9bec5c35 478 int dst_idx = dst_memcg->kmemcg_id;
2788cf0c
VD
479 struct list_lru_one *src, *dst;
480
481 /*
482 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
483 * we have to use IRQ-safe primitives here to avoid deadlock.
484 */
485 spin_lock_irq(&nlru->lock);
486
6a6b7b77
MS
487 src = list_lru_from_memcg_idx(lru, nid, src_idx);
488 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
2788cf0c
VD
489
490 list_splice_init(&src->list, &dst->list);
8199be00
YS
491
492 if (src->nr_items) {
493 dst->nr_items += src->nr_items;
2bfd3637 494 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
8199be00
YS
495 src->nr_items = 0;
496 }
2788cf0c
VD
497
498 spin_unlock_irq(&nlru->lock);
499}
500
501static void memcg_drain_list_lru(struct list_lru *lru,
9bec5c35 502 int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c
VD
503{
504 int i;
505
145949a1 506 for_each_node(i)
3b82c4dc 507 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
2788cf0c
VD
508}
509
9bec5c35 510void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c
VD
511{
512 struct list_lru *lru;
513
514 mutex_lock(&list_lrus_mutex);
3eef1127 515 list_for_each_entry(lru, &memcg_list_lrus, list)
9bec5c35 516 memcg_drain_list_lru(lru, src_idx, dst_memcg);
2788cf0c
VD
517 mutex_unlock(&list_lrus_mutex);
518}
88f2ef73
MS
519
520static bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
521 struct list_lru *lru)
522{
523 bool allocated;
524 int idx;
525
526 idx = memcg->kmemcg_id;
527 if (unlikely(idx < 0))
528 return true;
529
530 rcu_read_lock();
531 allocated = !!rcu_dereference(lru->mlrus)->mlru[idx];
532 rcu_read_unlock();
533
534 return allocated;
535}
536
537int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
538 gfp_t gfp)
539{
540 int i;
541 unsigned long flags;
542 struct list_lru_memcg *mlrus;
543 struct list_lru_memcg_table {
544 struct list_lru_per_memcg *mlru;
545 struct mem_cgroup *memcg;
546 } *table;
547
548 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
549 return 0;
550
551 gfp &= GFP_RECLAIM_MASK;
552 table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
553 if (!table)
554 return -ENOMEM;
555
556 /*
557 * Because the list_lru can be reparented to the parent cgroup's
558 * list_lru, we should make sure that this cgroup and all its
559 * ancestors have allocated list_lru_per_memcg.
560 */
561 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
562 if (memcg_list_lru_allocated(memcg, lru))
563 break;
564
565 table[i].memcg = memcg;
566 table[i].mlru = memcg_init_list_lru_one(gfp);
567 if (!table[i].mlru) {
568 while (i--)
569 kfree(table[i].mlru);
570 kfree(table);
571 return -ENOMEM;
572 }
573 }
574
575 spin_lock_irqsave(&lru->lock, flags);
576 mlrus = rcu_dereference_protected(lru->mlrus, true);
577 while (i--) {
578 int index = table[i].memcg->kmemcg_id;
579
580 if (mlrus->mlru[index])
581 kfree(table[i].mlru);
582 else
583 mlrus->mlru[index] = table[i].mlru;
584 }
585 spin_unlock_irqrestore(&lru->lock, flags);
586
587 kfree(table);
588
589 return 0;
590}
60d3fd32
VD
591#else
592static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
593{
594 return 0;
595}
596
597static void memcg_destroy_list_lru(struct list_lru *lru)
598{
599}
84c07d11 600#endif /* CONFIG_MEMCG_KMEM */
60d3fd32
VD
601
602int __list_lru_init(struct list_lru *lru, bool memcg_aware,
c92e8e10 603 struct lock_class_key *key, struct shrinker *shrinker)
a38e4082 604{
3b1d58a4 605 int i;
60d3fd32
VD
606 int err = -ENOMEM;
607
c92e8e10
KT
608#ifdef CONFIG_MEMCG_KMEM
609 if (shrinker)
610 lru->shrinker_id = shrinker->id;
611 else
612 lru->shrinker_id = -1;
613#endif
60d3fd32 614 memcg_get_cache_ids();
5ca302c8 615
b9726c26 616 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
5ca302c8 617 if (!lru->node)
60d3fd32 618 goto out;
a38e4082 619
145949a1 620 for_each_node(i) {
3b1d58a4 621 spin_lock_init(&lru->node[i].lock);
449dd698
JW
622 if (key)
623 lockdep_set_class(&lru->node[i].lock, key);
60d3fd32
VD
624 init_one_lru(&lru->node[i].lru);
625 }
626
627 err = memcg_init_list_lru(lru, memcg_aware);
628 if (err) {
629 kfree(lru->node);
1bc11d70
AP
630 /* Do this so a list_lru_destroy() doesn't crash: */
631 lru->node = NULL;
60d3fd32 632 goto out;
3b1d58a4 633 }
60d3fd32 634
c0a5b560 635 list_lru_register(lru);
60d3fd32
VD
636out:
637 memcg_put_cache_ids();
638 return err;
a38e4082 639}
60d3fd32 640EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8
GC
641
642void list_lru_destroy(struct list_lru *lru)
643{
c0a5b560
VD
644 /* Already destroyed or not yet initialized? */
645 if (!lru->node)
646 return;
60d3fd32
VD
647
648 memcg_get_cache_ids();
649
c0a5b560 650 list_lru_unregister(lru);
60d3fd32
VD
651
652 memcg_destroy_list_lru(lru);
5ca302c8 653 kfree(lru->node);
c0a5b560 654 lru->node = NULL;
60d3fd32 655
c92e8e10
KT
656#ifdef CONFIG_MEMCG_KMEM
657 lru->shrinker_id = -1;
658#endif
60d3fd32 659 memcg_put_cache_ids();
5ca302c8
GC
660}
661EXPORT_SYMBOL_GPL(list_lru_destroy);