}
#endif
-static inline struct kmem_cache *virt_to_cache(const void *obj)
+static noinline void warn_free_bad_obj(struct kmem_cache *s, void *obj)
{
+ struct kmem_cache *cachep;
struct slab *slab;
slab = virt_to_slab(obj);
- if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
- return NULL;
- return slab->slab_cache;
-}
-
-static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
-{
- struct kmem_cache *cachep;
+ if (WARN_ONCE(!slab,
+ "kmem_cache_free(%s, %p): object is not in a slab page\n",
+ s->name, obj))
+ return;
- if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
- !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
- return s;
+ cachep = slab->slab_cache;
- cachep = virt_to_cache(x);
- if (WARN(cachep && cachep != s,
- "%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name))
- print_tracking(cachep, x);
- return cachep;
+ if (WARN_ONCE(cachep != s,
+ "kmem_cache_free(%s, %p): object belongs to different cache %s\n",
+ s->name, obj, cachep ? cachep->name : "(NULL)")) {
+ if (cachep)
+ print_tracking(cachep, obj);
+ return;
+ }
}
/**
*/
void kmem_cache_free(struct kmem_cache *s, void *x)
{
- s = cache_from_obj(s, x);
- if (!s)
- return;
+ struct slab *slab;
+
+ slab = virt_to_slab(x);
+
+ if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) ||
+ kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
+
+ /*
+ * Intentionally leak the object in these cases, because it
+ * would be too dangerous to continue.
+ */
+ if (unlikely(!slab || (slab->slab_cache != s))) {
+ warn_free_bad_obj(s, x);
+ return;
+ }
+ }
+
trace_kmem_cache_free(_RET_IP_, x, s);
- slab_free(s, virt_to_slab(x), x, _RET_IP_);
+ slab_free(s, slab, x, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);
df->s = slab->slab_cache;
} else {
df->slab = slab;
- df->s = cache_from_obj(s, object); /* Support for memcg */
+ df->s = s;
}
/* Start new detached freelist */