]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: memcg/slab: uncharge during kmem_cache_free_bulk()
authorBharata B Rao <bharata@linux.ibm.com>
Tue, 13 Oct 2020 23:53:09 +0000 (16:53 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 5 Nov 2020 10:51:31 +0000 (11:51 +0100)
commit d1b2cf6cb84a9bd0de6f151512648dd1af82f80f upstream.

Object cgroup charging is done for all the objects during allocation, but
during freeing, uncharging ends up happening for only one object in the
case of bulk allocation/freeing.

Fix this by having a separate call to uncharge all the objects from
kmem_cache_free_bulk() and by modifying memcg_slab_free_hook() to take
care of bulk uncharging.

Fixes: 964d4bd370d5 ("mm: memcg/slab: save obj_cgroup for non-root slab objects"
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Roman Gushchin <guro@fb.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: <stable@vger.kernel.org>
Link: https://lkml.kernel.org/r/20201009060423.390479-1-bharata@linux.ibm.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/slab.c
mm/slab.h
mm/slub.c

index f658e86ec8ceeee6567829d48345c7fa9260a237..5c70600d8b1cc930e63ca7fa1ff4d94f3db1a09f 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3440,7 +3440,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
                memset(objp, 0, cachep->object_size);
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, caller);
-       memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
+       memcg_slab_free_hook(cachep, &objp, 1);
 
        /*
         * Skip calling cache_free_alien() when the platform is not numa.
index 6cc323f1313afc37ec999f12ffedbcd05f2a86a7..6dd4b702888a75aa02f93bc8e41938fa5a2d083e 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -345,30 +345,42 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
        obj_cgroup_put(objcg);
 }
 
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
-                                       void *p)
+static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
+                                       void **p, int objects)
 {
+       struct kmem_cache *s;
        struct obj_cgroup *objcg;
+       struct page *page;
        unsigned int off;
+       int i;
 
        if (!memcg_kmem_enabled())
                return;
 
-       if (!page_has_obj_cgroups(page))
-               return;
+       for (i = 0; i < objects; i++) {
+               if (unlikely(!p[i]))
+                       continue;
 
-       off = obj_to_index(s, page, p);
-       objcg = page_obj_cgroups(page)[off];
-       page_obj_cgroups(page)[off] = NULL;
+               page = virt_to_head_page(p[i]);
+               if (!page_has_obj_cgroups(page))
+                       continue;
 
-       if (!objcg)
-               return;
+               if (!s_orig)
+                       s = page->slab_cache;
+               else
+                       s = s_orig;
 
-       obj_cgroup_uncharge(objcg, obj_full_size(s));
-       mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
-                       -obj_full_size(s));
+               off = obj_to_index(s, page, p[i]);
+               objcg = page_obj_cgroups(page)[off];
+               if (!objcg)
+                       continue;
 
-       obj_cgroup_put(objcg);
+               page_obj_cgroups(page)[off] = NULL;
+               obj_cgroup_uncharge(objcg, obj_full_size(s));
+               mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
+                               -obj_full_size(s));
+               obj_cgroup_put(objcg);
+       }
 }
 
 #else /* CONFIG_MEMCG_KMEM */
@@ -406,8 +418,8 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 {
 }
 
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
-                                       void *p)
+static inline void memcg_slab_free_hook(struct kmem_cache *s,
+                                       void **p, int objects)
 {
 }
 #endif /* CONFIG_MEMCG_KMEM */
index 6d3574013b2f8cf745e9f2ffeab9b819e8b27546..0cbe67f13946ee6f9a90f9a09637e2019812e24f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3091,7 +3091,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       memcg_slab_free_hook(s, page, head);
+       memcg_slab_free_hook(s, &head, 1);
 redo:
        /*
         * Determine the currently cpus per cpu slab.
@@ -3253,6 +3253,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
        if (WARN_ON(!size))
                return;
 
+       memcg_slab_free_hook(s, p, size);
        do {
                struct detached_freelist df;