#define MEM_CGROUP_ID_SHIFT 16
-struct mem_cgroup_id {
+struct mem_cgroup_private_id {
int id;
refcount_t ref;
};
struct cgroup_subsys_state css;
/* Private memcg ID. Used to ID objects that outlive the cgroup */
- struct mem_cgroup_id id;
+ struct mem_cgroup_private_id id;
/* Accounted resources */
struct page_counter memory; /* Both v1 & v2 */
void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*)(struct task_struct *, void *), void *arg);
-static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
{
if (mem_cgroup_disabled())
return 0;
return memcg->id.id;
}
+struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id);
+
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+{
+ return mem_cgroup_private_id(memcg);
+}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
#ifdef CONFIG_SHRINKER_DEBUG
return NULL;
}
+static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
+{
+ WARN_ON_ONCE(id);
+ /* XXX: This should always return root_mem_cgroup */
+ return NULL;
+}
+
#ifdef CONFIG_SHRINKER_DEBUG
static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
{
*/
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
-static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
+static DEFINE_XARRAY_ALLOC1(mem_cgroup_private_ids);
-static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
+static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg)
{
if (memcg->id.id > 0) {
- xa_erase(&mem_cgroup_ids, memcg->id.id);
+ xa_erase(&mem_cgroup_private_ids, memcg->id.id);
memcg->id.id = 0;
}
}
-void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
+void __maybe_unused mem_cgroup_private_id_get_many(struct mem_cgroup *memcg,
unsigned int n)
{
refcount_add(n, &memcg->id.ref);
}
-static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
+static void mem_cgroup_private_id_put_many(struct mem_cgroup *memcg, unsigned int n)
{
if (refcount_sub_and_test(n, &memcg->id.ref)) {
- mem_cgroup_id_remove(memcg);
+ mem_cgroup_private_id_remove(memcg);
/* Memcg ID pins CSS */
css_put(&memcg->css);
}
}
-static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
+static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg)
{
- mem_cgroup_id_put_many(memcg, 1);
+ mem_cgroup_private_id_put_many(memcg, 1);
}
-struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
+struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg)
{
while (!refcount_inc_not_zero(&memcg->id.ref)) {
/*
}
/**
- * mem_cgroup_from_id - look up a memcg from a memcg id
+ * mem_cgroup_from_private_id - look up a memcg from a memcg id
* @id: the memcg id to look up
*
* Caller must hold rcu_read_lock().
*/
-struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- return xa_load(&mem_cgroup_ids, id);
+ return xa_load(&mem_cgroup_private_ids, id);
+}
+
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ return mem_cgroup_from_private_id(id);
}
#ifdef CONFIG_SHRINKER_DEBUG
if (!memcg)
return ERR_PTR(-ENOMEM);
- error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
+ error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL,
XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
if (error)
goto fail;
lru_gen_init_memcg(memcg);
return memcg;
fail:
- mem_cgroup_id_remove(memcg);
+ mem_cgroup_private_id_remove(memcg);
__mem_cgroup_free(memcg);
return ERR_PTR(error);
}
css_get(css);
/*
- * Ensure mem_cgroup_from_id() works once we're fully online.
+ * Ensure mem_cgroup_from_private_id() works once we're fully online.
*
* We could do this earlier and require callers to filter with
* css_tryget_online(). But right now there are no users that
* publish it here at the end of onlining. This matches the
* regular ID destruction during offlining.
*/
- xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
+ xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL);
return 0;
offline_kmem:
memcg_offline_kmem(memcg);
remove_id:
- mem_cgroup_id_remove(memcg);
+ mem_cgroup_private_id_remove(memcg);
return -ENOMEM;
}
drain_all_stock(memcg);
- mem_cgroup_id_put(memcg);
+ mem_cgroup_private_id_put(memcg);
}
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
id = lookup_swap_cgroup_id(entry);
rcu_read_lock();
- memcg = mem_cgroup_from_id(id);
+ memcg = mem_cgroup_from_private_id(id);
if (!memcg || !css_tryget_online(&memcg->css))
memcg = get_mem_cgroup_from_mm(mm);
rcu_read_unlock();
return 0;
}
- memcg = mem_cgroup_id_get_online(memcg);
+ memcg = mem_cgroup_private_id_get_online(memcg);
if (!mem_cgroup_is_root(memcg) &&
!page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
memcg_memory_event(memcg, MEMCG_SWAP_MAX);
memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
- mem_cgroup_id_put(memcg);
+ mem_cgroup_private_id_put(memcg);
return -ENOMEM;
}
/* Get references for the tail pages, too */
if (nr_pages > 1)
- mem_cgroup_id_get_many(memcg, nr_pages - 1);
+ mem_cgroup_private_id_get_many(memcg, nr_pages - 1);
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
- swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
+ swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry);
return 0;
}
id = swap_cgroup_clear(entry, nr_pages);
rcu_read_lock();
- memcg = mem_cgroup_from_id(id);
+ memcg = mem_cgroup_from_private_id(id);
if (memcg) {
if (!mem_cgroup_is_root(memcg)) {
if (do_memsw_account())
page_counter_uncharge(&memcg->swap, nr_pages);
}
mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
- mem_cgroup_id_put_many(memcg, nr_pages);
+ mem_cgroup_private_id_put_many(memcg, nr_pages);
}
rcu_read_unlock();
}
hist = lru_hist_from_seq(min_seq);
atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
- return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset);
+ return pack_shadow(mem_cgroup_private_id(memcg), pgdat, token, workingset);
}
/*
unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
- memcg = mem_cgroup_from_id(memcg_id);
+ memcg = mem_cgroup_from_private_id(memcg_id);
*lruvec = mem_cgroup_lruvec(memcg, pgdat);
max_seq = READ_ONCE((*lruvec)->lrugen.max_seq);
lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
/* XXX: target_memcg can be NULL, go through lruvec */
- memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
+ memcgid = mem_cgroup_private_id(lruvec_memcg(lruvec));
eviction = atomic_long_read(&lruvec->nonresident_age);
eviction >>= bucket_order;
workingset_age_nonresident(lruvec, folio_nr_pages(folio));
* would be better if the root_mem_cgroup existed in all
* configurations instead.
*/
- eviction_memcg = mem_cgroup_from_id(memcgid);
+ eviction_memcg = mem_cgroup_from_private_id(memcgid);
if (!mem_cgroup_tryget(eviction_memcg))
eviction_memcg = NULL;
rcu_read_unlock();