From f342d646726845789e7a7c458eec1bf7d4eca67d Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Tue, 28 Jul 2020 11:10:57 -0400 Subject: [PATCH] Fix backport of mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch to 4.19 Signed-off-by: Sasha Levin --- ...-leak-at-non-root-kmem_cache-destroy.patch | 23 +++-- ...nchronize-access-to-kmem_cache-dying.patch | 91 +++++++++++++++++++ queue-4.19/series | 1 + 3 files changed, 106 insertions(+), 9 deletions(-) create mode 100644 queue-4.19/mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch diff --git a/queue-4.19/mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch b/queue-4.19/mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch index 9e3c35aaae6..2f5f75d0fd8 100644 --- a/queue-4.19/mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch +++ b/queue-4.19/mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch @@ -41,14 +41,16 @@ Cc: Link: http://lkml.kernel.org/r/20200716165103.83462-1-songmuchun@bytedance.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman - +Signed-off-by: Sasha Levin --- - mm/slab_common.c | 35 ++++++++++++++++++++++++++++------- + mm/slab_common.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) +diff --git a/mm/slab_common.c b/mm/slab_common.c +index 3dbf693527ddc..a94b9981eb172 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c -@@ -310,6 +310,14 @@ int slab_unmergeable(struct kmem_cache * +@@ -311,6 +311,14 @@ int slab_unmergeable(struct kmem_cache *s) if (s->refcount < 0) return 1; @@ -63,16 +65,16 @@ Signed-off-by: Greg Kroah-Hartman return 0; } -@@ -832,12 +840,15 @@ static int shutdown_memcg_caches(struct +@@ -841,12 +849,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void memcg_set_kmem_cache_dying(struct kmem_cache *s) { - mutex_lock(&slab_mutex); + spin_lock_irq(&memcg_kmem_wq_lock); s->memcg_params.dying = true; - mutex_unlock(&slab_mutex); + spin_unlock_irq(&memcg_kmem_wq_lock); +} +static void flush_memcg_workqueue(struct kmem_cache *s) @@ -80,7 +82,7 @@ Signed-off-by: Greg Kroah-Hartman /* * SLUB deactivates the kmem_caches through call_rcu_sched. Make * sure all registered rcu callbacks have been invoked. -@@ -858,10 +869,6 @@ static inline int shutdown_memcg_caches( +@@ -867,10 +878,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) { return 0; } @@ -91,7 +93,7 @@ Signed-off-by: Greg Kroah-Hartman #endif /* CONFIG_MEMCG_KMEM */ void slab_kmem_cache_release(struct kmem_cache *s) -@@ -879,8 +886,6 @@ void kmem_cache_destroy(struct kmem_cach +@@ -888,8 +895,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; @@ -100,7 +102,7 @@ Signed-off-by: Greg Kroah-Hartman get_online_cpus(); get_online_mems(); -@@ -890,6 +895,22 @@ void kmem_cache_destroy(struct kmem_cach +@@ -899,6 +904,22 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; @@ -123,3 +125,6 @@ Signed-off-by: Greg Kroah-Hartman err = shutdown_memcg_caches(s); if (!err) err = shutdown_cache(s); +-- +2.25.1 + diff --git a/queue-4.19/mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch b/queue-4.19/mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch new file mode 100644 index 00000000000..a7a6cb980d5 --- /dev/null +++ b/queue-4.19/mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch @@ -0,0 +1,91 @@ +From 4589b7df7fa4ae321c8c38ff28888d543317e7e4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 11 Jul 2019 20:56:24 -0700 +Subject: mm: memcg/slab: synchronize access to kmem_cache dying flag using a + spinlock + +From: Roman Gushchin + +[ Upstream commit 63b02ef7dc4ec239df45c018ac0adbd02ba30a0c ] + +Currently the memcg_params.dying flag and the corresponding workqueue used +for the asynchronous deactivation of kmem_caches is synchronized using the +slab_mutex. + +It makes impossible to check this flag from the irq context, which will be +required in order to implement asynchronous release of kmem_caches. + +So let's switch over to the irq-save flavor of the spinlock-based +synchronization. + +Link: http://lkml.kernel.org/r/20190611231813.3148843-8-guro@fb.com +Signed-off-by: Roman Gushchin +Acked-by: Vladimir Davydov +Reviewed-by: Shakeel Butt +Cc: Christoph Lameter +Cc: Johannes Weiner +Cc: Michal Hocko +Cc: Waiman Long +Cc: David Rientjes +Cc: Joonsoo Kim +Cc: Pekka Enberg +Cc: Andrei Vagin +Cc: Qian Cai +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + mm/slab_common.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/mm/slab_common.c b/mm/slab_common.c +index b5776b1301f0c..3dbf693527ddc 100644 +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -130,6 +130,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, + #ifdef CONFIG_MEMCG_KMEM + + LIST_HEAD(slab_root_caches); ++static DEFINE_SPINLOCK(memcg_kmem_wq_lock); + + void slab_init_memcg_params(struct kmem_cache *s) + { +@@ -717,14 +718,22 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, + WARN_ON_ONCE(s->memcg_params.deact_fn)) + return; + ++ /* ++ * memcg_kmem_wq_lock is used to synchronize memcg_params.dying ++ * flag and make sure that no new kmem_cache deactivation tasks ++ * are queued (see flush_memcg_workqueue() ). ++ */ ++ spin_lock_irq(&memcg_kmem_wq_lock); + if (s->memcg_params.root_cache->memcg_params.dying) +- return; ++ goto unlock; + + /* pin memcg so that @s doesn't get destroyed in the middle */ + css_get(&s->memcg_params.memcg->css); + + s->memcg_params.deact_fn = deact_fn; + call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); ++unlock: ++ spin_unlock_irq(&memcg_kmem_wq_lock); + } + + void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) +@@ -834,9 +843,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s) + + static void flush_memcg_workqueue(struct kmem_cache *s) + { +- mutex_lock(&slab_mutex); ++ spin_lock_irq(&memcg_kmem_wq_lock); + s->memcg_params.dying = true; +- mutex_unlock(&slab_mutex); ++ spin_unlock_irq(&memcg_kmem_wq_lock); + + /* + * SLUB deactivates the kmem_caches through call_rcu_sched. Make +-- +2.25.1 + diff --git a/queue-4.19/series b/queue-4.19/series index b7d6895d67c..13ed3a285ce 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -72,6 +72,7 @@ fbdev-detect-integer-underflow-at-struct-fbcon_ops-clear_margins.patch vt-reject-zero-sized-screen-buffer-size.patch makefile-fix-gcc_toolchain_dir-prefix-for-clang-cross-compilation.patch mm-memcg-fix-refcount-error-while-moving-and-swapping.patch +mm-memcg-slab-synchronize-access-to-kmem_cache-dying.patch mm-memcg-slab-fix-memory-leak-at-non-root-kmem_cache-destroy.patch io-mapping-indicate-mapping-failure.patch drm-amdgpu-fix-null-dereference-in-dpm-sysfs-handlers.patch -- 2.47.3