]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
slub: Keep track of whether slub is on the per-node partial list
authorChengming Zhou <zhouchengming@bytedance.com>
Thu, 2 Nov 2023 03:23:24 +0000 (03:23 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Wed, 22 Nov 2023 14:36:25 +0000 (15:36 +0100)
Now we rely on the "frozen" bit to see if we should manipulate the
slab->slab_list, which will be changed in the following patch.

Instead we introduce another way to keep track of whether slub is on
the per-node partial list, here we reuse the PG_workingset bit.

We have to use the atomic set_bit() and clear_bit() variants and change
slab_unlock() to bit_spin_unlock() because when cmpxchg is not available
and PG_lock is used, there may be concurrent operations on the two bits.
Thanks to Mark Brown for reporting a hang and testing of a previous
version where the non-atomic operations were used.

Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index 03384cd965c513469b21d2649f4e3e3b87e9b535..6efcbf79fd2dc95541da6fd5b15f080f0b0bc553 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -522,7 +522,7 @@ static __always_inline void slab_unlock(struct slab *slab)
        struct page *page = slab_page(slab);
 
        VM_BUG_ON_PAGE(PageTail(page), page);
-       __bit_spin_unlock(PG_locked, &page->flags);
+       bit_spin_unlock(PG_locked, &page->flags);
 }
 
 static inline bool
@@ -2116,6 +2116,25 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
        free_slab(s, slab);
 }
 
+/*
+ * SLUB reuses PG_workingset bit to keep track of whether it's on
+ * the per-node partial list.
+ */
+static inline bool slab_test_node_partial(const struct slab *slab)
+{
+       return folio_test_workingset((struct folio *)slab_folio(slab));
+}
+
+static inline void slab_set_node_partial(struct slab *slab)
+{
+       set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
+}
+
+static inline void slab_clear_node_partial(struct slab *slab)
+{
+       clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
+}
+
 /*
  * Management of partially allocated slabs.
  */
@@ -2127,6 +2146,7 @@ __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
                list_add_tail(&slab->slab_list, &n->partial);
        else
                list_add(&slab->slab_list, &n->partial);
+       slab_set_node_partial(slab);
 }
 
 static inline void add_partial(struct kmem_cache_node *n,
@@ -2141,6 +2161,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
 {
        lockdep_assert_held(&n->list_lock);
        list_del(&slab->slab_list);
+       slab_clear_node_partial(slab);
        n->nr_partial--;
 }
 
@@ -4833,6 +4854,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
 
                        if (free == slab->objects) {
                                list_move(&slab->slab_list, &discard);
+                               slab_clear_node_partial(slab);
                                n->nr_partial--;
                                dec_slabs_node(s, node, slab->objects);
                        } else if (free <= SHRINK_PROMOTE_MAX)