From 3df29914d9fd1a28ff0630ad5aa8a92abb97543d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 11 Jun 2025 16:59:07 +0100 Subject: [PATCH] slab: Add SL_pfmemalloc flag Give slab its own name for this flag. Move the implementation from slab.h to slub.c since it's only used inside slub.c. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Harry Yoo Link: https://patch.msgid.link/20250611155916.2579160-5-willy@infradead.org Signed-off-by: Vlastimil Babka --- mm/slab.h | 24 ------------------------ mm/slub.c | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index 32785ff3470a8..248b34c839b7c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -167,30 +167,6 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t) */ #define slab_page(s) folio_page(slab_folio(s), 0) -/* - * If network-based swap is enabled, sl*b must keep track of whether pages - * were allocated from pfmemalloc reserves. - */ -static inline bool slab_test_pfmemalloc(const struct slab *slab) -{ - return folio_test_active(slab_folio(slab)); -} - -static inline void slab_set_pfmemalloc(struct slab *slab) -{ - folio_set_active(slab_folio(slab)); -} - -static inline void slab_clear_pfmemalloc(struct slab *slab) -{ - folio_clear_active(slab_folio(slab)); -} - -static inline void __slab_clear_pfmemalloc(struct slab *slab) -{ - __folio_clear_active(slab_folio(slab)); -} - static inline void *slab_address(const struct slab *slab) { return folio_address(slab_folio(slab)); diff --git a/mm/slub.c b/mm/slub.c index 15d92c736af57..d44be423dd507 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -187,6 +187,7 @@ * enum slab_flags - How the slab flags bits are used. * @SL_locked: Is locked with slab_lock() * @SL_partial: On the per-node partial list + * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves * * The slab flags share space with the page flags but some bits have * different interpretations. The high bits are used for information @@ -195,6 +196,7 @@ enum slab_flags { SL_locked = PG_locked, SL_partial = PG_workingset, /* Historical reasons for this bit */ + SL_pfmemalloc = PG_active, /* Historical reasons for this bit */ }; /* @@ -648,6 +650,25 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) } #endif /* CONFIG_SLUB_CPU_PARTIAL */ +/* + * If network-based swap is enabled, slub must keep track of whether memory + * were allocated from pfmemalloc reserves. + */ +static inline bool slab_test_pfmemalloc(const struct slab *slab) +{ + return test_bit(SL_pfmemalloc, &slab->flags); +} + +static inline void slab_set_pfmemalloc(struct slab *slab) +{ + set_bit(SL_pfmemalloc, &slab->flags); +} + +static inline void __slab_clear_pfmemalloc(struct slab *slab) +{ + __clear_bit(SL_pfmemalloc, &slab->flags); +} + /* * Per slab locking using the pagelock */ -- 2.47.2