* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
- * The object to be freed can be allocated either by kmalloc() or
- * kmem_cache_alloc().
+ * The object to be freed can be allocated either by kmalloc(),
+ * kmalloc_nolock(), or kmem_cache_alloc().
*
* Note that the allowable offset might decrease in the future.
*
struct kmemleak_object *object;
object = find_and_remove_object(ptr, 0, objflags);
- if (!object) {
-#ifdef DEBUG
- kmemleak_warn("Freeing unknown object at 0x%08lx\n",
- ptr);
-#endif
+ if (!object)
+ /*
+ * kmalloc_nolock() -> kfree() calls kmemleak_free()
+ * without kmemleak_alloc().
+ */
return;
- }
__delete_object(object);
}
struct kmemleak_object *object;
object = __find_and_get_object(ptr, 0, objflags);
- if (!object) {
- kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
- ptr,
- (color == KMEMLEAK_GREY) ? "Grey" :
- (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
+ if (!object)
+ /*
+ * kmalloc_nolock() -> kfree_rcu() calls kmemleak_ignore()
+ * without kmemleak_alloc().
+ */
return;
- }
paint_it(object, color);
put_object(object);
}
* Returns true if freeing of the object can proceed, false if its reuse
* was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
* to KFENCE.
+ *
+ * For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
+ * are invoked, so some free hooks must handle asymmetric hook calls.
+ *
+ * Alloc hooks called for kmalloc_nolock():
+ * - kmsan_slab_alloc()
+ * - kasan_slab_alloc()
+ * - memcg_slab_post_alloc_hook()
+ * - alloc_tagging_slab_alloc_hook()
+ *
+ * Free hooks that must handle missing corresponding alloc hooks:
+ * - kmemleak_free_recursive()
+ * - kfence_free()
+ *
+ * Free hooks that have no alloc hook counterpart, and thus safe to call:
+ * - debug_check_no_locks_freed()
+ * - debug_check_no_obj_freed()
+ * - __kcsan_check_access()
*/
static __always_inline
bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
/**
* kfree - free previously allocated memory
- * @object: pointer returned by kmalloc() or kmem_cache_alloc()
+ * @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
*
* If @object is NULL, no operation is performed.
*/
page = virt_to_page(object);
slab = page_slab(page);
if (!slab) {
+ /* kmalloc_nolock() doesn't support large kmalloc */
free_large_kmalloc(page, (void *)object);
return;
}