#undef SLAB_MATCH
static_assert(sizeof(struct slab) <= sizeof(struct page));
#if defined(system_has_freelist_aba)
- static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
+ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
#endif
-/**
- * folio_slab - Converts from folio to slab.
- * @folio: The folio.
- *
- * Currently struct slab is a different representation of a folio where
- * folio_test_slab() is true.
- *
- * Return: The slab which contains this folio.
- */
-#define folio_slab(folio) (_Generic((folio), \
- const struct folio *: (const struct slab *)(folio), \
- struct folio *: (struct slab *)(folio)))
-
/**
* slab_folio - The folio allocated for a slab
* @s: The slab.
NR_SLUB_STAT_ITEMS
};
- /*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
- struct kmem_cache_cpu {
-#ifndef CONFIG_SLUB_TINY
+ struct freelist_tid {
union {
struct {
- void **freelist; /* Pointer to next available object */
+ void *freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
};
- freelist_aba_t freelist_tid;
+ freelist_full_t freelist_tid;
};
+ };
+
+ /*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
+ struct kmem_cache_cpu {
+ struct freelist_tid;
struct slab *slab; /* The slab from which we are allocating */
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct slab *partial; /* Partially allocated slabs */