]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: syscall: Introduce memcg enter/exit helpers
authorPuranjay Mohan <puranjay@kernel.org>
Fri, 2 Jan 2026 20:02:27 +0000 (12:02 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 2 Jan 2026 22:31:59 +0000 (14:31 -0800)
Introduce bpf_map_memcg_enter() and bpf_map_memcg_exit() helpers to
reduce code duplication in memcg context management.

bpf_map_memcg_enter() gets the memcg from the map, sets it as active,
and returns both the previous and the now active memcg.

bpf_map_memcg_exit() restores the previous active memcg and releases the
reference obtained during enter.

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Link: https://lore.kernel.org/r/20260102200230.25168-2-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/syscall.c

index 9efb2ddf331ccd68735d883c48f796198ea9e265..4e9667ed66306336b590b303b3e76d306a82e82b 100644 (file)
@@ -2608,6 +2608,10 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
 int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
                        unsigned long nr_pages, struct page **page_array);
 #ifdef CONFIG_MEMCG
+void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+                        struct mem_cgroup **new_memcg);
+void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+                       struct mem_cgroup *memcg);
 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
                           int node);
 void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
@@ -2632,6 +2636,17 @@ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
                kvcalloc(_n, _size, _flags)
 #define bpf_map_alloc_percpu(_map, _size, _align, _flags)      \
                __alloc_percpu_gfp(_size, _align, _flags)
+static inline void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+                                      struct mem_cgroup **new_memcg)
+{
+       *new_memcg = NULL;
+       *old_memcg = NULL;
+}
+
+static inline void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+                                     struct mem_cgroup *memcg)
+{
+}
 #endif
 
 static inline int
index a4d38272d8bcc03ec3e16b5ad5e16f38f97e9053..c77ab2e3265975e1f6e87f266267fc8e450a5bab 100644 (file)
@@ -505,17 +505,29 @@ static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
        return root_mem_cgroup;
 }
 
+void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+                        struct mem_cgroup **new_memcg)
+{
+       *new_memcg = bpf_map_get_memcg(map);
+       *old_memcg = set_active_memcg(*new_memcg);
+}
+
+void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+                       struct mem_cgroup *new_memcg)
+{
+       set_active_memcg(old_memcg);
+       mem_cgroup_put(new_memcg);
+}
+
 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
                           int node)
 {
        struct mem_cgroup *memcg, *old_memcg;
        void *ptr;
 
-       memcg = bpf_map_get_memcg(map);
-       old_memcg = set_active_memcg(memcg);
+       bpf_map_memcg_enter(map, &old_memcg, &memcg);
        ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
-       set_active_memcg(old_memcg);
-       mem_cgroup_put(memcg);
+       bpf_map_memcg_exit(old_memcg, memcg);
 
        return ptr;
 }
@@ -526,11 +538,9 @@ void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags
        struct mem_cgroup *memcg, *old_memcg;
        void *ptr;
 
-       memcg = bpf_map_get_memcg(map);
-       old_memcg = set_active_memcg(memcg);
+       bpf_map_memcg_enter(map, &old_memcg, &memcg);
        ptr = kmalloc_nolock(size, flags | __GFP_ACCOUNT, node);
-       set_active_memcg(old_memcg);
-       mem_cgroup_put(memcg);
+       bpf_map_memcg_exit(old_memcg, memcg);
 
        return ptr;
 }
@@ -540,11 +550,9 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
        struct mem_cgroup *memcg, *old_memcg;
        void *ptr;
 
-       memcg = bpf_map_get_memcg(map);
-       old_memcg = set_active_memcg(memcg);
+       bpf_map_memcg_enter(map, &old_memcg, &memcg);
        ptr = kzalloc(size, flags | __GFP_ACCOUNT);
-       set_active_memcg(old_memcg);
-       mem_cgroup_put(memcg);
+       bpf_map_memcg_exit(old_memcg, memcg);
 
        return ptr;
 }
@@ -555,11 +563,9 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
        struct mem_cgroup *memcg, *old_memcg;
        void *ptr;
 
-       memcg = bpf_map_get_memcg(map);
-       old_memcg = set_active_memcg(memcg);
+       bpf_map_memcg_enter(map, &old_memcg, &memcg);
        ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
-       set_active_memcg(old_memcg);
-       mem_cgroup_put(memcg);
+       bpf_map_memcg_exit(old_memcg, memcg);
 
        return ptr;
 }
@@ -570,11 +576,9 @@ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
        struct mem_cgroup *memcg, *old_memcg;
        void __percpu *ptr;
 
-       memcg = bpf_map_get_memcg(map);
-       old_memcg = set_active_memcg(memcg);
+       bpf_map_memcg_enter(map, &old_memcg, &memcg);
        ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
-       set_active_memcg(old_memcg);
-       mem_cgroup_put(memcg);
+       bpf_map_memcg_exit(old_memcg, memcg);
 
        return ptr;
 }
@@ -612,12 +616,9 @@ int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
        unsigned long i, j;
        struct page *pg;
        int ret = 0;
-#ifdef CONFIG_MEMCG
        struct mem_cgroup *memcg, *old_memcg;
 
-       memcg = bpf_map_get_memcg(map);
-       old_memcg = set_active_memcg(memcg);
-#endif
+       bpf_map_memcg_enter(map, &old_memcg, &memcg);
        for (i = 0; i < nr_pages; i++) {
                pg = __bpf_alloc_page(nid);
 
@@ -631,10 +632,7 @@ int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
                break;
        }
 
-#ifdef CONFIG_MEMCG
-       set_active_memcg(old_memcg);
-       mem_cgroup_put(memcg);
-#endif
+       bpf_map_memcg_exit(old_memcg, memcg);
        return ret;
 }