]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fix backport of
authorSasha Levin <sashal@kernel.org>
Tue, 28 Jul 2020 15:10:57 +0000 (11:10 -0400)
committerSasha Levin <sashal@kernel.org>
Tue, 28 Jul 2020 15:12:32 +0000 (11:12 -0400)
mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch to
4.19

Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.19/mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch
queue-4.19/mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch [new file with mode: 0644]
queue-4.19/series

index 9e3c35aaae6b08c90033eb38238de26bf8a72fae..2f5f75d0fd85bd9002ee280339346433a306b54f 100644 (file)
@@ -41,14 +41,16 @@ Cc: <stable@vger.kernel.org>
 Link: http://lkml.kernel.org/r/20200716165103.83462-1-songmuchun@bytedance.com
 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
+Signed-off-by: Sasha Levin <sashal@kernel.org>
 ---
- mm/slab_common.c |   35 ++++++++++++++++++++++++++++-------
+ mm/slab_common.c | 35 ++++++++++++++++++++++++++++-------
  1 file changed, 28 insertions(+), 7 deletions(-)
 
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 3dbf693527ddc..a94b9981eb172 100644
 --- a/mm/slab_common.c
 +++ b/mm/slab_common.c
-@@ -310,6 +310,14 @@ int slab_unmergeable(struct kmem_cache *
+@@ -311,6 +311,14 @@ int slab_unmergeable(struct kmem_cache *s)
        if (s->refcount < 0)
                return 1;
  
@@ -63,16 +65,16 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        return 0;
  }
  
-@@ -832,12 +840,15 @@ static int shutdown_memcg_caches(struct
+@@ -841,12 +849,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
        return 0;
  }
  
 -static void flush_memcg_workqueue(struct kmem_cache *s)
 +static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
  {
-       mutex_lock(&slab_mutex);
+       spin_lock_irq(&memcg_kmem_wq_lock);
        s->memcg_params.dying = true;
-       mutex_unlock(&slab_mutex);
+       spin_unlock_irq(&memcg_kmem_wq_lock);
 +}
  
 +static void flush_memcg_workqueue(struct kmem_cache *s)
@@ -80,7 +82,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        /*
         * SLUB deactivates the kmem_caches through call_rcu_sched. Make
         * sure all registered rcu callbacks have been invoked.
-@@ -858,10 +869,6 @@ static inline int shutdown_memcg_caches(
+@@ -867,10 +878,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
  {
        return 0;
  }
@@ -91,7 +93,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  #endif /* CONFIG_MEMCG_KMEM */
  
  void slab_kmem_cache_release(struct kmem_cache *s)
-@@ -879,8 +886,6 @@ void kmem_cache_destroy(struct kmem_cach
+@@ -888,8 +895,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
        if (unlikely(!s))
                return;
  
@@ -100,7 +102,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        get_online_cpus();
        get_online_mems();
  
-@@ -890,6 +895,22 @@ void kmem_cache_destroy(struct kmem_cach
+@@ -899,6 +904,22 @@ void kmem_cache_destroy(struct kmem_cache *s)
        if (s->refcount)
                goto out_unlock;
  
@@ -123,3 +125,6 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
        err = shutdown_memcg_caches(s);
        if (!err)
                err = shutdown_cache(s);
+-- 
+2.25.1
+
diff --git a/queue-4.19/mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch b/queue-4.19/mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch
new file mode 100644 (file)
index 0000000..a7a6cb9
--- /dev/null
@@ -0,0 +1,91 @@
+From 4589b7df7fa4ae321c8c38ff28888d543317e7e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Jul 2019 20:56:24 -0700
+Subject: mm: memcg/slab: synchronize access to kmem_cache dying flag using a
+ spinlock
+
+From: Roman Gushchin <guro@fb.com>
+
+[ Upstream commit 63b02ef7dc4ec239df45c018ac0adbd02ba30a0c ]
+
+Currently the memcg_params.dying flag and the corresponding workqueue used
+for the asynchronous deactivation of kmem_caches is synchronized using the
+slab_mutex.
+
+It makes impossible to check this flag from the irq context, which will be
+required in order to implement asynchronous release of kmem_caches.
+
+So let's switch over to the irq-save flavor of the spinlock-based
+synchronization.
+
+Link: http://lkml.kernel.org/r/20190611231813.3148843-8-guro@fb.com
+Signed-off-by: Roman Gushchin <guro@fb.com>
+Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Waiman Long <longman@redhat.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Andrei Vagin <avagin@gmail.com>
+Cc: Qian Cai <cai@lca.pw>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/slab_common.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index b5776b1301f0c..3dbf693527ddc 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -130,6 +130,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
+ #ifdef CONFIG_MEMCG_KMEM
+ LIST_HEAD(slab_root_caches);
++static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
+ void slab_init_memcg_params(struct kmem_cache *s)
+ {
+@@ -717,14 +718,22 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
+           WARN_ON_ONCE(s->memcg_params.deact_fn))
+               return;
++      /*
++       * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
++       * flag and make sure that no new kmem_cache deactivation tasks
++       * are queued (see flush_memcg_workqueue() ).
++       */
++      spin_lock_irq(&memcg_kmem_wq_lock);
+       if (s->memcg_params.root_cache->memcg_params.dying)
+-              return;
++              goto unlock;
+       /* pin memcg so that @s doesn't get destroyed in the middle */
+       css_get(&s->memcg_params.memcg->css);
+       s->memcg_params.deact_fn = deact_fn;
+       call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
++unlock:
++      spin_unlock_irq(&memcg_kmem_wq_lock);
+ }
+ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
+@@ -834,9 +843,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
+ static void flush_memcg_workqueue(struct kmem_cache *s)
+ {
+-      mutex_lock(&slab_mutex);
++      spin_lock_irq(&memcg_kmem_wq_lock);
+       s->memcg_params.dying = true;
+-      mutex_unlock(&slab_mutex);
++      spin_unlock_irq(&memcg_kmem_wq_lock);
+       /*
+        * SLUB deactivates the kmem_caches through call_rcu_sched. Make
+-- 
+2.25.1
+
index b7d6895d67c54ff066dbda2d7a52d13fb5bef07b..13ed3a285ce0c808846b96bd2e32ab6f0105145a 100644 (file)
@@ -72,6 +72,7 @@ fbdev-detect-integer-underflow-at-struct-fbcon_ops-clear_margins.patch
 vt-reject-zero-sized-screen-buffer-size.patch
 makefile-fix-gcc_toolchain_dir-prefix-for-clang-cross-compilation.patch
 mm-memcg-fix-refcount-error-while-moving-and-swapping.patch
+mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch
 mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch
 io-mapping-indicate-mapping-failure.patch
 drm-amdgpu-fix-null-dereference-in-dpm-sysfs-handlers.patch