static struct kfence_metadata *kfence_metadata_init __read_mostly;
/* Freelist with available objects. */
-static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
-static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
+DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
+static struct list_head kfence_freelist __guarded_by(&kfence_freelist_lock) = LIST_HEAD_INIT(kfence_freelist);
/*
* The static key to set up a KFENCE allocation; or if static keys are not used
}
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
+ __must_hold(&meta->lock)
{
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
static noinline void
metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
unsigned long *stack_entries, size_t num_stack_entries)
+ __must_hold(&meta->lock)
{
struct kfence_track *track =
next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
alloc_covered_add(alloc_stack_hash, 1);
/* Set required slab fields. */
- slab = virt_to_slab((void *)meta->addr);
+ slab = virt_to_slab(addr);
slab->slab_cache = cache;
slab->objects = 1;
static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
{
struct kcsan_scoped_access assert_page_exclusive;
+ u32 alloc_stack_hash;
unsigned long flags;
bool init;
/* Mark the object as freed. */
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
init = slab_want_init_on_free(meta->cache);
+ alloc_stack_hash = meta->alloc_stack_hash;
raw_spin_unlock_irqrestore(&meta->lock, flags);
- alloc_covered_add(meta->alloc_stack_hash, -1);
+ alloc_covered_add(alloc_stack_hash, -1);
/* Check canary bytes for memory corruption. */
check_canary(meta);
* which partial initialization succeeded.
*/
static unsigned long kfence_init_pool(void)
+ __context_unsafe(/* constructor */)
{
unsigned long addr, start_pfn;
int i;
{
const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
struct kfence_metadata *to_report = NULL;
+ unsigned long unprotected_page = 0;
enum kfence_error_type error_type;
unsigned long flags;
if (!to_report)
goto out;
- raw_spin_lock_irqsave(&to_report->lock, flags);
- to_report->unprotected_page = addr;
error_type = KFENCE_ERROR_OOB;
+ unprotected_page = addr;
/*
* If the object was freed before we took the look we can still
if (!to_report)
goto out;
- raw_spin_lock_irqsave(&to_report->lock, flags);
error_type = KFENCE_ERROR_UAF;
/*
* We may race with __kfence_alloc(), and it is possible that a
out:
if (to_report) {
+ raw_spin_lock_irqsave(&to_report->lock, flags);
+ to_report->unprotected_page = unprotected_page;
kfence_report_error(addr, is_write, regs, to_report, error_type);
raw_spin_unlock_irqrestore(&to_report->lock, flags);
} else {
/* Maximum stack depth for reports. */
#define KFENCE_STACK_DEPTH 64
+extern raw_spinlock_t kfence_freelist_lock;
+
/* KFENCE object states. */
enum kfence_object_state {
KFENCE_OBJECT_UNUSED, /* Object is unused. */
/* KFENCE metadata per guarded allocation. */
struct kfence_metadata {
- struct list_head list; /* Freelist node; access under kfence_freelist_lock. */
+ struct list_head list __guarded_by(&kfence_freelist_lock); /* Freelist node. */
struct rcu_head rcu_head; /* For delayed freeing. */
/*
* In case of an invalid access, the page that was unprotected; we
* optimistically only store one address.
*/
- unsigned long unprotected_page;
+ unsigned long unprotected_page __guarded_by(&lock);
/* Allocation and free stack information. */
- struct kfence_track alloc_track;
- struct kfence_track free_track;
+ struct kfence_track alloc_track __guarded_by(&lock);
+ struct kfence_track free_track __guarded_by(&lock);
/* For updating alloc_covered on frees. */
- u32 alloc_stack_hash;
+ u32 alloc_stack_hash __guarded_by(&lock);
#ifdef CONFIG_MEMCG
struct slabobj_ext obj_exts;
#endif
void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
const struct kfence_metadata *meta, enum kfence_error_type type);
-void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
+void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) __must_hold(&meta->lock);
#endif /* MM_KFENCE_KFENCE_H */
static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
bool show_alloc)
+ __must_hold(&meta->lock)
{
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
u64 ts_sec = track->ts_nsec;
if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
return;
- if (meta)
- lockdep_assert_held(&meta->lock);
/*
* Because we may generate reports in printk-unfriendly parts of the
* kernel, such as scheduler code, the use of printk() could deadlock.
stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
if (meta) {
+ lockdep_assert_held(&meta->lock);
pr_err("\n");
kfence_print_object(NULL, meta);
}