]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
slab: Rename slab->__page_flags to slab->flags
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 11 Jun 2025 15:59:05 +0000 (16:59 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Wed, 18 Jun 2025 11:06:26 +0000 (13:06 +0200)
Slab has its own reasons for using flag bits; they aren't just
the page bits.  Maybe this won't be the ultimate solution, but
we should be clear that these bits are in use.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Link: https://patch.msgid.link/20250611155916.2579160-3-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slab.h
mm/slub.c

index 05a21dc796e095e8db934564d559494cd81746ec..32785ff3470a89e693a013bcfba44ab531dbff1d 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -50,7 +50,7 @@ typedef union {
 
 /* Reuses the bits in struct page */
 struct slab {
-       unsigned long __page_flags;
+       unsigned long flags;
 
        struct kmem_cache *slab_cache;
        union {
@@ -99,7 +99,7 @@ struct slab {
 
 #define SLAB_MATCH(pg, sl)                                             \
        static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
-SLAB_MATCH(flags, __page_flags);
+SLAB_MATCH(flags, flags);
 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
 SLAB_MATCH(_refcount, __page_refcount);
 #ifdef CONFIG_MEMCG
index 823042efbfc987601c3384c201d3bada441b7683..9353da50b573af94c30c434664edac1f9d18f70d 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
  *                     the fast path and disables lockless freelists.
  */
 
+/**
+ * enum slab_flags - How the slab flags bits are used.
+ * @SL_locked: Is locked with slab_lock()
+ *
+ * The slab flags share space with the page flags but some bits have
+ * different interpretations.  The high bits are used for information
+ * like zone/node/section.
+ */
+enum slab_flags {
+       SL_locked = PG_locked,
+};
+
 /*
  * We could simply use migrate_disable()/enable() but as long as it's a
  * function call even on !PREEMPT_RT, use inline preempt_disable() there.
@@ -639,12 +651,12 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
  */
 static __always_inline void slab_lock(struct slab *slab)
 {
-       bit_spin_lock(PG_locked, &slab->__page_flags);
+       bit_spin_lock(SL_locked, &slab->flags);
 }
 
 static __always_inline void slab_unlock(struct slab *slab)
 {
-       bit_spin_unlock(PG_locked, &slab->__page_flags);
+       bit_spin_unlock(SL_locked, &slab->flags);
 }
 
 static inline bool
@@ -1010,7 +1022,7 @@ static void print_slab_info(const struct slab *slab)
 {
        pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
               slab, slab->objects, slab->inuse, slab->freelist,
-              &slab->__page_flags);
+              &slab->flags);
 }
 
 void skip_orig_size_check(struct kmem_cache *s, const void *object)