1 // SPDX-License-Identifier: GPL-2.0
3 * KFENCE guarded object allocator and fault handling.
5 * Copyright (C) 2020, Google LLC.
8 #define pr_fmt(fmt) "kfence: " fmt
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/panic_notifier.h>
26 #include <linux/random.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/clock.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
34 #include <asm/kfence.h>
38 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
39 #define KFENCE_WARN_ON(cond) \
41 const bool __cond = WARN_ON(cond); \
42 if (unlikely(__cond)) { \
43 WRITE_ONCE(kfence_enabled, false); \
44 disabled_by_warn = true; \
49 /* === Data ================================================================= */
51 static bool kfence_enabled __read_mostly
;
52 static bool disabled_by_warn __read_mostly
;
54 unsigned long kfence_sample_interval __read_mostly
= CONFIG_KFENCE_SAMPLE_INTERVAL
;
55 EXPORT_SYMBOL_GPL(kfence_sample_interval
); /* Export for test modules. */
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
60 #define MODULE_PARAM_PREFIX "kfence."
62 static int kfence_enable_late(void);
63 static int param_set_sample_interval(const char *val
, const struct kernel_param
*kp
)
66 int ret
= kstrtoul(val
, 0, &num
);
71 /* Using 0 to indicate KFENCE is disabled. */
72 if (!num
&& READ_ONCE(kfence_enabled
)) {
73 pr_info("disabled\n");
74 WRITE_ONCE(kfence_enabled
, false);
77 *((unsigned long *)kp
->arg
) = num
;
79 if (num
&& !READ_ONCE(kfence_enabled
) && system_state
!= SYSTEM_BOOTING
)
80 return disabled_by_warn
? -EINVAL
: kfence_enable_late();
84 static int param_get_sample_interval(char *buffer
, const struct kernel_param
*kp
)
86 if (!READ_ONCE(kfence_enabled
))
87 return sprintf(buffer
, "0\n");
89 return param_get_ulong(buffer
, kp
);
92 static const struct kernel_param_ops sample_interval_param_ops
= {
93 .set
= param_set_sample_interval
,
94 .get
= param_get_sample_interval
,
96 module_param_cb(sample_interval
, &sample_interval_param_ops
, &kfence_sample_interval
, 0600);
98 /* Pool usage% threshold when currently covered allocations are skipped. */
99 static unsigned long kfence_skip_covered_thresh __read_mostly
= 75;
100 module_param_named(skip_covered_thresh
, kfence_skip_covered_thresh
, ulong
, 0644);
102 /* If true, use a deferrable timer. */
103 static bool kfence_deferrable __read_mostly
= IS_ENABLED(CONFIG_KFENCE_DEFERRABLE
);
104 module_param_named(deferrable
, kfence_deferrable
, bool, 0444);
106 /* If true, check all canary bytes on panic. */
107 static bool kfence_check_on_panic __read_mostly
;
108 module_param_named(check_on_panic
, kfence_check_on_panic
, bool, 0444);
110 /* The pool of pages used for guard pages and objects. */
111 char *__kfence_pool __read_mostly
;
112 EXPORT_SYMBOL(__kfence_pool
); /* Export for test modules. */
115 * Per-object metadata, with one-to-one mapping of object metadata to
116 * backing pages (in __kfence_pool).
118 static_assert(CONFIG_KFENCE_NUM_OBJECTS
> 0);
119 struct kfence_metadata kfence_metadata
[CONFIG_KFENCE_NUM_OBJECTS
];
121 /* Freelist with available objects. */
122 static struct list_head kfence_freelist
= LIST_HEAD_INIT(kfence_freelist
);
123 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock
); /* Lock protecting freelist. */
126 * The static key to set up a KFENCE allocation; or if static keys are not used
127 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
129 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key
);
131 /* Gates the allocation, ensuring only one succeeds in a given period. */
132 atomic_t kfence_allocation_gate
= ATOMIC_INIT(1);
135 * A Counting Bloom filter of allocation coverage: limits currently covered
136 * allocations of the same source filling up the pool.
138 * Assuming a range of 15%-85% unique allocations in the pool at any point in
139 * time, the below parameters provide a probablity of 0.02-0.33 for false
140 * positive hits respectively:
142 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
144 #define ALLOC_COVERED_HNUM 2
145 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
146 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
147 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
148 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
149 static atomic_t alloc_covered
[ALLOC_COVERED_SIZE
];
151 /* Stack depth used to determine uniqueness of an allocation. */
152 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
155 * Randomness for stack hashes, making the same collisions across reboots and
156 * different machines less likely.
158 static u32 stack_hash_seed __ro_after_init
;
160 /* Statistics counters for debugfs. */
161 enum kfence_counter_id
{
162 KFENCE_COUNTER_ALLOCATED
,
163 KFENCE_COUNTER_ALLOCS
,
164 KFENCE_COUNTER_FREES
,
165 KFENCE_COUNTER_ZOMBIES
,
167 KFENCE_COUNTER_SKIP_INCOMPAT
,
168 KFENCE_COUNTER_SKIP_CAPACITY
,
169 KFENCE_COUNTER_SKIP_COVERED
,
170 KFENCE_COUNTER_COUNT
,
172 static atomic_long_t counters
[KFENCE_COUNTER_COUNT
];
173 static const char *const counter_names
[] = {
174 [KFENCE_COUNTER_ALLOCATED
] = "currently allocated",
175 [KFENCE_COUNTER_ALLOCS
] = "total allocations",
176 [KFENCE_COUNTER_FREES
] = "total frees",
177 [KFENCE_COUNTER_ZOMBIES
] = "zombie allocations",
178 [KFENCE_COUNTER_BUGS
] = "total bugs",
179 [KFENCE_COUNTER_SKIP_INCOMPAT
] = "skipped allocations (incompatible)",
180 [KFENCE_COUNTER_SKIP_CAPACITY
] = "skipped allocations (capacity)",
181 [KFENCE_COUNTER_SKIP_COVERED
] = "skipped allocations (covered)",
183 static_assert(ARRAY_SIZE(counter_names
) == KFENCE_COUNTER_COUNT
);
185 /* === Internals ============================================================ */
187 static inline bool should_skip_covered(void)
189 unsigned long thresh
= (CONFIG_KFENCE_NUM_OBJECTS
* kfence_skip_covered_thresh
) / 100;
191 return atomic_long_read(&counters
[KFENCE_COUNTER_ALLOCATED
]) > thresh
;
194 static u32
get_alloc_stack_hash(unsigned long *stack_entries
, size_t num_entries
)
196 num_entries
= min(num_entries
, UNIQUE_ALLOC_STACK_DEPTH
);
197 num_entries
= filter_irq_stacks(stack_entries
, num_entries
);
198 return jhash(stack_entries
, num_entries
* sizeof(stack_entries
[0]), stack_hash_seed
);
202 * Adds (or subtracts) count @val for allocation stack trace hash
203 * @alloc_stack_hash from Counting Bloom filter.
205 static void alloc_covered_add(u32 alloc_stack_hash
, int val
)
209 for (i
= 0; i
< ALLOC_COVERED_HNUM
; i
++) {
210 atomic_add(val
, &alloc_covered
[alloc_stack_hash
& ALLOC_COVERED_MASK
]);
211 alloc_stack_hash
= ALLOC_COVERED_HNEXT(alloc_stack_hash
);
216 * Returns true if the allocation stack trace hash @alloc_stack_hash is
217 * currently contained (non-zero count) in Counting Bloom filter.
219 static bool alloc_covered_contains(u32 alloc_stack_hash
)
223 for (i
= 0; i
< ALLOC_COVERED_HNUM
; i
++) {
224 if (!atomic_read(&alloc_covered
[alloc_stack_hash
& ALLOC_COVERED_MASK
]))
226 alloc_stack_hash
= ALLOC_COVERED_HNEXT(alloc_stack_hash
);
232 static bool kfence_protect(unsigned long addr
)
234 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr
, PAGE_SIZE
), true));
237 static bool kfence_unprotect(unsigned long addr
)
239 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr
, PAGE_SIZE
), false));
242 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata
*meta
)
244 unsigned long offset
= (meta
- kfence_metadata
+ 1) * PAGE_SIZE
* 2;
245 unsigned long pageaddr
= (unsigned long)&__kfence_pool
[offset
];
247 /* The checks do not affect performance; only called from slow-paths. */
249 /* Only call with a pointer into kfence_metadata. */
250 if (KFENCE_WARN_ON(meta
< kfence_metadata
||
251 meta
>= kfence_metadata
+ CONFIG_KFENCE_NUM_OBJECTS
))
255 * This metadata object only ever maps to 1 page; verify that the stored
256 * address is in the expected range.
258 if (KFENCE_WARN_ON(ALIGN_DOWN(meta
->addr
, PAGE_SIZE
) != pageaddr
))
265 * Update the object's metadata state, including updating the alloc/free stacks
266 * depending on the state transition.
269 metadata_update_state(struct kfence_metadata
*meta
, enum kfence_object_state next
,
270 unsigned long *stack_entries
, size_t num_stack_entries
)
272 struct kfence_track
*track
=
273 next
== KFENCE_OBJECT_FREED
? &meta
->free_track
: &meta
->alloc_track
;
275 lockdep_assert_held(&meta
->lock
);
278 memcpy(track
->stack_entries
, stack_entries
,
279 num_stack_entries
* sizeof(stack_entries
[0]));
282 * Skip over 1 (this) functions; noinline ensures we do not
283 * accidentally skip over the caller by never inlining.
285 num_stack_entries
= stack_trace_save(track
->stack_entries
, KFENCE_STACK_DEPTH
, 1);
287 track
->num_stack_entries
= num_stack_entries
;
288 track
->pid
= task_pid_nr(current
);
289 track
->cpu
= raw_smp_processor_id();
290 track
->ts_nsec
= local_clock(); /* Same source as printk timestamps. */
293 * Pairs with READ_ONCE() in
294 * kfence_shutdown_cache(),
295 * kfence_handle_page_fault().
297 WRITE_ONCE(meta
->state
, next
);
300 /* Check canary byte at @addr. */
301 static inline bool check_canary_byte(u8
*addr
)
303 struct kfence_metadata
*meta
;
306 if (likely(*addr
== KFENCE_CANARY_PATTERN_U8(addr
)))
309 atomic_long_inc(&counters
[KFENCE_COUNTER_BUGS
]);
311 meta
= addr_to_metadata((unsigned long)addr
);
312 raw_spin_lock_irqsave(&meta
->lock
, flags
);
313 kfence_report_error((unsigned long)addr
, false, NULL
, meta
, KFENCE_ERROR_CORRUPTION
);
314 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
319 static inline void set_canary(const struct kfence_metadata
*meta
)
321 const unsigned long pageaddr
= ALIGN_DOWN(meta
->addr
, PAGE_SIZE
);
322 unsigned long addr
= pageaddr
;
325 * The canary may be written to part of the object memory, but it does
326 * not affect it. The user should initialize the object before using it.
328 for (; addr
< meta
->addr
; addr
+= sizeof(u64
))
329 *((u64
*)addr
) = KFENCE_CANARY_PATTERN_U64
;
331 addr
= ALIGN_DOWN(meta
->addr
+ meta
->size
, sizeof(u64
));
332 for (; addr
- pageaddr
< PAGE_SIZE
; addr
+= sizeof(u64
))
333 *((u64
*)addr
) = KFENCE_CANARY_PATTERN_U64
;
336 static inline void check_canary(const struct kfence_metadata
*meta
)
338 const unsigned long pageaddr
= ALIGN_DOWN(meta
->addr
, PAGE_SIZE
);
339 unsigned long addr
= pageaddr
;
342 * We'll iterate over each canary byte per-side until a corrupted byte
343 * is found. However, we'll still iterate over the canary bytes to the
344 * right of the object even if there was an error in the canary bytes to
345 * the left of the object. Specifically, if check_canary_byte()
346 * generates an error, showing both sides might give more clues as to
347 * what the error is about when displaying which bytes were corrupted.
350 /* Apply to left of object. */
351 for (; meta
->addr
- addr
>= sizeof(u64
); addr
+= sizeof(u64
)) {
352 if (unlikely(*((u64
*)addr
) != KFENCE_CANARY_PATTERN_U64
))
357 * If the canary is corrupted in a certain 64 bytes, or the canary
358 * memory cannot be completely covered by multiple consecutive 64 bytes,
359 * it needs to be checked one by one.
361 for (; addr
< meta
->addr
; addr
++) {
362 if (unlikely(!check_canary_byte((u8
*)addr
)))
366 /* Apply to right of object. */
367 for (addr
= meta
->addr
+ meta
->size
; addr
% sizeof(u64
) != 0; addr
++) {
368 if (unlikely(!check_canary_byte((u8
*)addr
)))
371 for (; addr
- pageaddr
< PAGE_SIZE
; addr
+= sizeof(u64
)) {
372 if (unlikely(*((u64
*)addr
) != KFENCE_CANARY_PATTERN_U64
)) {
374 for (; addr
- pageaddr
< PAGE_SIZE
; addr
++) {
375 if (!check_canary_byte((u8
*)addr
))
382 static void *kfence_guarded_alloc(struct kmem_cache
*cache
, size_t size
, gfp_t gfp
,
383 unsigned long *stack_entries
, size_t num_stack_entries
,
384 u32 alloc_stack_hash
)
386 struct kfence_metadata
*meta
= NULL
;
390 const bool random_right_allocate
= get_random_u32_below(2);
391 const bool random_fault
= CONFIG_KFENCE_STRESS_TEST_FAULTS
&&
392 !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS
);
394 /* Try to obtain a free object. */
395 raw_spin_lock_irqsave(&kfence_freelist_lock
, flags
);
396 if (!list_empty(&kfence_freelist
)) {
397 meta
= list_entry(kfence_freelist
.next
, struct kfence_metadata
, list
);
398 list_del_init(&meta
->list
);
400 raw_spin_unlock_irqrestore(&kfence_freelist_lock
, flags
);
402 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_CAPACITY
]);
406 if (unlikely(!raw_spin_trylock_irqsave(&meta
->lock
, flags
))) {
408 * This is extremely unlikely -- we are reporting on a
409 * use-after-free, which locked meta->lock, and the reporting
410 * code via printk calls kmalloc() which ends up in
411 * kfence_alloc() and tries to grab the same object that we're
412 * reporting on. While it has never been observed, lockdep does
413 * report that there is a possibility of deadlock. Fix it by
414 * using trylock and bailing out gracefully.
416 raw_spin_lock_irqsave(&kfence_freelist_lock
, flags
);
417 /* Put the object back on the freelist. */
418 list_add_tail(&meta
->list
, &kfence_freelist
);
419 raw_spin_unlock_irqrestore(&kfence_freelist_lock
, flags
);
424 meta
->addr
= metadata_to_pageaddr(meta
);
425 /* Unprotect if we're reusing this page. */
426 if (meta
->state
== KFENCE_OBJECT_FREED
)
427 kfence_unprotect(meta
->addr
);
430 * Note: for allocations made before RNG initialization, will always
431 * return zero. We still benefit from enabling KFENCE as early as
432 * possible, even when the RNG is not yet available, as this will allow
433 * KFENCE to detect bugs due to earlier allocations. The only downside
434 * is that the out-of-bounds accesses detected are deterministic for
437 if (random_right_allocate
) {
438 /* Allocate on the "right" side, re-calculate address. */
439 meta
->addr
+= PAGE_SIZE
- size
;
440 meta
->addr
= ALIGN_DOWN(meta
->addr
, cache
->align
);
443 addr
= (void *)meta
->addr
;
445 /* Update remaining metadata. */
446 metadata_update_state(meta
, KFENCE_OBJECT_ALLOCATED
, stack_entries
, num_stack_entries
);
447 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
448 WRITE_ONCE(meta
->cache
, cache
);
450 meta
->alloc_stack_hash
= alloc_stack_hash
;
451 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
453 alloc_covered_add(alloc_stack_hash
, 1);
455 /* Set required slab fields. */
456 slab
= virt_to_slab((void *)meta
->addr
);
457 slab
->slab_cache
= cache
;
458 #if defined(CONFIG_SLUB)
460 #elif defined(CONFIG_SLAB)
464 /* Memory initialization. */
468 * We check slab_want_init_on_alloc() ourselves, rather than letting
469 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
472 if (unlikely(slab_want_init_on_alloc(gfp
, cache
)))
473 memzero_explicit(addr
, size
);
478 kfence_protect(meta
->addr
); /* Random "faults" by protecting the object. */
480 atomic_long_inc(&counters
[KFENCE_COUNTER_ALLOCATED
]);
481 atomic_long_inc(&counters
[KFENCE_COUNTER_ALLOCS
]);
486 static void kfence_guarded_free(void *addr
, struct kfence_metadata
*meta
, bool zombie
)
488 struct kcsan_scoped_access assert_page_exclusive
;
492 raw_spin_lock_irqsave(&meta
->lock
, flags
);
494 if (meta
->state
!= KFENCE_OBJECT_ALLOCATED
|| meta
->addr
!= (unsigned long)addr
) {
495 /* Invalid or double-free, bail out. */
496 atomic_long_inc(&counters
[KFENCE_COUNTER_BUGS
]);
497 kfence_report_error((unsigned long)addr
, false, NULL
, meta
,
498 KFENCE_ERROR_INVALID_FREE
);
499 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
503 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
504 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr
, PAGE_SIZE
), PAGE_SIZE
,
505 KCSAN_ACCESS_SCOPED
| KCSAN_ACCESS_WRITE
| KCSAN_ACCESS_ASSERT
,
506 &assert_page_exclusive
);
508 if (CONFIG_KFENCE_STRESS_TEST_FAULTS
)
509 kfence_unprotect((unsigned long)addr
); /* To check canary bytes. */
511 /* Restore page protection if there was an OOB access. */
512 if (meta
->unprotected_page
) {
513 memzero_explicit((void *)ALIGN_DOWN(meta
->unprotected_page
, PAGE_SIZE
), PAGE_SIZE
);
514 kfence_protect(meta
->unprotected_page
);
515 meta
->unprotected_page
= 0;
518 /* Mark the object as freed. */
519 metadata_update_state(meta
, KFENCE_OBJECT_FREED
, NULL
, 0);
520 init
= slab_want_init_on_free(meta
->cache
);
521 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
523 alloc_covered_add(meta
->alloc_stack_hash
, -1);
525 /* Check canary bytes for memory corruption. */
529 * Clear memory if init-on-free is set. While we protect the page, the
530 * data is still there, and after a use-after-free is detected, we
531 * unprotect the page, so the data is still accessible.
533 if (!zombie
&& unlikely(init
))
534 memzero_explicit(addr
, meta
->size
);
536 /* Protect to detect use-after-frees. */
537 kfence_protect((unsigned long)addr
);
539 kcsan_end_scoped_access(&assert_page_exclusive
);
541 /* Add it to the tail of the freelist for reuse. */
542 raw_spin_lock_irqsave(&kfence_freelist_lock
, flags
);
543 KFENCE_WARN_ON(!list_empty(&meta
->list
));
544 list_add_tail(&meta
->list
, &kfence_freelist
);
545 raw_spin_unlock_irqrestore(&kfence_freelist_lock
, flags
);
547 atomic_long_dec(&counters
[KFENCE_COUNTER_ALLOCATED
]);
548 atomic_long_inc(&counters
[KFENCE_COUNTER_FREES
]);
550 /* See kfence_shutdown_cache(). */
551 atomic_long_inc(&counters
[KFENCE_COUNTER_ZOMBIES
]);
555 static void rcu_guarded_free(struct rcu_head
*h
)
557 struct kfence_metadata
*meta
= container_of(h
, struct kfence_metadata
, rcu_head
);
559 kfence_guarded_free((void *)meta
->addr
, meta
, false);
563 * Initialization of the KFENCE pool after its allocation.
564 * Returns 0 on success; otherwise returns the address up to
565 * which partial initialization succeeded.
567 static unsigned long kfence_init_pool(void)
569 unsigned long addr
= (unsigned long)__kfence_pool
;
573 if (!arch_kfence_init_pool())
576 pages
= virt_to_page(__kfence_pool
);
579 * Set up object pages: they must have PG_slab set, to avoid freeing
580 * these as real pages.
582 * We also want to avoid inserting kfence_free() in the kfree()
583 * fast-path in SLUB, and therefore need to ensure kfree() correctly
584 * enters __slab_free() slow-path.
586 for (i
= 0; i
< KFENCE_POOL_SIZE
/ PAGE_SIZE
; i
++) {
587 struct slab
*slab
= page_slab(nth_page(pages
, i
));
592 __folio_set_slab(slab_folio(slab
));
594 slab
->memcg_data
= (unsigned long)&kfence_metadata
[i
/ 2 - 1].objcg
|
600 * Protect the first 2 pages. The first page is mostly unnecessary, and
601 * merely serves as an extended guard page. However, adding one
602 * additional page in the beginning gives us an even number of pages,
603 * which simplifies the mapping of address to metadata index.
605 for (i
= 0; i
< 2; i
++) {
606 if (unlikely(!kfence_protect(addr
)))
612 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
613 struct kfence_metadata
*meta
= &kfence_metadata
[i
];
615 /* Initialize metadata. */
616 INIT_LIST_HEAD(&meta
->list
);
617 raw_spin_lock_init(&meta
->lock
);
618 meta
->state
= KFENCE_OBJECT_UNUSED
;
619 meta
->addr
= addr
; /* Initialize for validation in metadata_to_pageaddr(). */
620 list_add_tail(&meta
->list
, &kfence_freelist
);
622 /* Protect the right redzone. */
623 if (unlikely(!kfence_protect(addr
+ PAGE_SIZE
)))
626 addr
+= 2 * PAGE_SIZE
;
632 for (i
= 0; i
< KFENCE_POOL_SIZE
/ PAGE_SIZE
; i
++) {
633 struct slab
*slab
= page_slab(nth_page(pages
, i
));
638 slab
->memcg_data
= 0;
640 __folio_clear_slab(slab_folio(slab
));
646 static bool __init
kfence_init_pool_early(void)
653 addr
= kfence_init_pool();
657 * The pool is live and will never be deallocated from this point on.
658 * Ignore the pool object from the kmemleak phys object tree, as it would
659 * otherwise overlap with allocations returned by kfence_alloc(), which
660 * are registered with kmemleak through the slab post-alloc hook.
662 kmemleak_ignore_phys(__pa(__kfence_pool
));
667 * Only release unprotected pages, and do not try to go back and change
668 * page attributes due to risk of failing to do so as well. If changing
669 * page attributes for some pages fails, it is very likely that it also
670 * fails for the first page, and therefore expect addr==__kfence_pool in
671 * most failure cases.
673 memblock_free_late(__pa(addr
), KFENCE_POOL_SIZE
- (addr
- (unsigned long)__kfence_pool
));
674 __kfence_pool
= NULL
;
678 static bool kfence_init_pool_late(void)
680 unsigned long addr
, free_size
;
682 addr
= kfence_init_pool();
688 free_size
= KFENCE_POOL_SIZE
- (addr
- (unsigned long)__kfence_pool
);
689 #ifdef CONFIG_CONTIG_ALLOC
690 free_contig_range(page_to_pfn(virt_to_page((void *)addr
)), free_size
/ PAGE_SIZE
);
692 free_pages_exact((void *)addr
, free_size
);
694 __kfence_pool
= NULL
;
698 /* === DebugFS Interface ==================================================== */
700 static int stats_show(struct seq_file
*seq
, void *v
)
704 seq_printf(seq
, "enabled: %i\n", READ_ONCE(kfence_enabled
));
705 for (i
= 0; i
< KFENCE_COUNTER_COUNT
; i
++)
706 seq_printf(seq
, "%s: %ld\n", counter_names
[i
], atomic_long_read(&counters
[i
]));
710 DEFINE_SHOW_ATTRIBUTE(stats
);
713 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
714 * start_object() and next_object() return the object index + 1, because NULL is used
717 static void *start_object(struct seq_file
*seq
, loff_t
*pos
)
719 if (*pos
< CONFIG_KFENCE_NUM_OBJECTS
)
720 return (void *)((long)*pos
+ 1);
724 static void stop_object(struct seq_file
*seq
, void *v
)
728 static void *next_object(struct seq_file
*seq
, void *v
, loff_t
*pos
)
731 if (*pos
< CONFIG_KFENCE_NUM_OBJECTS
)
732 return (void *)((long)*pos
+ 1);
736 static int show_object(struct seq_file
*seq
, void *v
)
738 struct kfence_metadata
*meta
= &kfence_metadata
[(long)v
- 1];
741 raw_spin_lock_irqsave(&meta
->lock
, flags
);
742 kfence_print_object(seq
, meta
);
743 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
744 seq_puts(seq
, "---------------------------------\n");
749 static const struct seq_operations objects_sops
= {
750 .start
= start_object
,
755 DEFINE_SEQ_ATTRIBUTE(objects
);
757 static int kfence_debugfs_init(void)
759 struct dentry
*kfence_dir
;
761 if (!READ_ONCE(kfence_enabled
))
764 kfence_dir
= debugfs_create_dir("kfence", NULL
);
765 debugfs_create_file("stats", 0444, kfence_dir
, NULL
, &stats_fops
);
766 debugfs_create_file("objects", 0400, kfence_dir
, NULL
, &objects_fops
);
770 late_initcall(kfence_debugfs_init
);
772 /* === Panic Notifier ====================================================== */
774 static void kfence_check_all_canary(void)
778 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
779 struct kfence_metadata
*meta
= &kfence_metadata
[i
];
781 if (meta
->state
== KFENCE_OBJECT_ALLOCATED
)
786 static int kfence_check_canary_callback(struct notifier_block
*nb
,
787 unsigned long reason
, void *arg
)
789 kfence_check_all_canary();
793 static struct notifier_block kfence_check_canary_notifier
= {
794 .notifier_call
= kfence_check_canary_callback
,
797 /* === Allocation Gate Timer ================================================ */
799 static struct delayed_work kfence_timer
;
801 #ifdef CONFIG_KFENCE_STATIC_KEYS
802 /* Wait queue to wake up allocation-gate timer task. */
803 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait
);
805 static void wake_up_kfence_timer(struct irq_work
*work
)
807 wake_up(&allocation_wait
);
809 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work
, wake_up_kfence_timer
);
813 * Set up delayed work, which will enable and disable the static key. We need to
814 * use a work queue (rather than a simple timer), since enabling and disabling a
815 * static key cannot be done from an interrupt.
817 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
818 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
819 * more aggressive sampling intervals), we could get away with a variant that
820 * avoids IPIs, at the cost of not immediately capturing allocations if the
821 * instructions remain cached.
823 static void toggle_allocation_gate(struct work_struct
*work
)
825 if (!READ_ONCE(kfence_enabled
))
828 atomic_set(&kfence_allocation_gate
, 0);
829 #ifdef CONFIG_KFENCE_STATIC_KEYS
830 /* Enable static key, and await allocation to happen. */
831 static_branch_enable(&kfence_allocation_key
);
833 wait_event_idle(allocation_wait
, atomic_read(&kfence_allocation_gate
));
835 /* Disable static key and reset timer. */
836 static_branch_disable(&kfence_allocation_key
);
838 queue_delayed_work(system_unbound_wq
, &kfence_timer
,
839 msecs_to_jiffies(kfence_sample_interval
));
842 /* === Public interface ===================================================== */
844 void __init
kfence_alloc_pool(void)
846 if (!kfence_sample_interval
)
849 /* if the pool has already been initialized by arch, skip the below. */
853 __kfence_pool
= memblock_alloc(KFENCE_POOL_SIZE
, PAGE_SIZE
);
856 pr_err("failed to allocate pool\n");
859 static void kfence_init_enable(void)
861 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS
))
862 static_branch_enable(&kfence_allocation_key
);
864 if (kfence_deferrable
)
865 INIT_DEFERRABLE_WORK(&kfence_timer
, toggle_allocation_gate
);
867 INIT_DELAYED_WORK(&kfence_timer
, toggle_allocation_gate
);
869 if (kfence_check_on_panic
)
870 atomic_notifier_chain_register(&panic_notifier_list
, &kfence_check_canary_notifier
);
872 WRITE_ONCE(kfence_enabled
, true);
873 queue_delayed_work(system_unbound_wq
, &kfence_timer
, 0);
875 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE
,
876 CONFIG_KFENCE_NUM_OBJECTS
, (void *)__kfence_pool
,
877 (void *)(__kfence_pool
+ KFENCE_POOL_SIZE
));
880 void __init
kfence_init(void)
882 stack_hash_seed
= get_random_u32();
884 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
885 if (!kfence_sample_interval
)
888 if (!kfence_init_pool_early()) {
889 pr_err("%s failed\n", __func__
);
893 kfence_init_enable();
896 static int kfence_init_late(void)
898 const unsigned long nr_pages
= KFENCE_POOL_SIZE
/ PAGE_SIZE
;
899 #ifdef CONFIG_CONTIG_ALLOC
902 pages
= alloc_contig_pages(nr_pages
, GFP_KERNEL
, first_online_node
, NULL
);
905 __kfence_pool
= page_to_virt(pages
);
907 if (nr_pages
> MAX_ORDER_NR_PAGES
) {
908 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
911 __kfence_pool
= alloc_pages_exact(KFENCE_POOL_SIZE
, GFP_KERNEL
);
916 if (!kfence_init_pool_late()) {
917 pr_err("%s failed\n", __func__
);
921 kfence_init_enable();
922 kfence_debugfs_init();
927 static int kfence_enable_late(void)
930 return kfence_init_late();
932 WRITE_ONCE(kfence_enabled
, true);
933 queue_delayed_work(system_unbound_wq
, &kfence_timer
, 0);
934 pr_info("re-enabled\n");
938 void kfence_shutdown_cache(struct kmem_cache
*s
)
941 struct kfence_metadata
*meta
;
944 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
947 meta
= &kfence_metadata
[i
];
950 * If we observe some inconsistent cache and state pair where we
951 * should have returned false here, cache destruction is racing
952 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
953 * the lock will not help, as different critical section
954 * serialization will have the same outcome.
956 if (READ_ONCE(meta
->cache
) != s
||
957 READ_ONCE(meta
->state
) != KFENCE_OBJECT_ALLOCATED
)
960 raw_spin_lock_irqsave(&meta
->lock
, flags
);
961 in_use
= meta
->cache
== s
&& meta
->state
== KFENCE_OBJECT_ALLOCATED
;
962 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
966 * This cache still has allocations, and we should not
967 * release them back into the freelist so they can still
968 * safely be used and retain the kernel's default
969 * behaviour of keeping the allocations alive (leak the
970 * cache); however, they effectively become "zombie
971 * allocations" as the KFENCE objects are the only ones
972 * still in use and the owning cache is being destroyed.
974 * We mark them freed, so that any subsequent use shows
975 * more useful error messages that will include stack
976 * traces of the user of the object, the original
977 * allocation, and caller to shutdown_cache().
979 kfence_guarded_free((void *)meta
->addr
, meta
, /*zombie=*/true);
983 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
984 meta
= &kfence_metadata
[i
];
987 if (READ_ONCE(meta
->cache
) != s
|| READ_ONCE(meta
->state
) != KFENCE_OBJECT_FREED
)
990 raw_spin_lock_irqsave(&meta
->lock
, flags
);
991 if (meta
->cache
== s
&& meta
->state
== KFENCE_OBJECT_FREED
)
993 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
997 void *__kfence_alloc(struct kmem_cache
*s
, size_t size
, gfp_t flags
)
999 unsigned long stack_entries
[KFENCE_STACK_DEPTH
];
1000 size_t num_stack_entries
;
1001 u32 alloc_stack_hash
;
1004 * Perform size check before switching kfence_allocation_gate, so that
1005 * we don't disable KFENCE without making an allocation.
1007 if (size
> PAGE_SIZE
) {
1008 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_INCOMPAT
]);
1013 * Skip allocations from non-default zones, including DMA. We cannot
1014 * guarantee that pages in the KFENCE pool will have the requested
1015 * properties (e.g. reside in DMAable memory).
1017 if ((flags
& GFP_ZONEMASK
) ||
1018 (s
->flags
& (SLAB_CACHE_DMA
| SLAB_CACHE_DMA32
))) {
1019 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_INCOMPAT
]);
1024 * Skip allocations for this slab, if KFENCE has been disabled for
1027 if (s
->flags
& SLAB_SKIP_KFENCE
)
1030 if (atomic_inc_return(&kfence_allocation_gate
) > 1)
1032 #ifdef CONFIG_KFENCE_STATIC_KEYS
1034 * waitqueue_active() is fully ordered after the update of
1035 * kfence_allocation_gate per atomic_inc_return().
1037 if (waitqueue_active(&allocation_wait
)) {
1039 * Calling wake_up() here may deadlock when allocations happen
1040 * from within timer code. Use an irq_work to defer it.
1042 irq_work_queue(&wake_up_kfence_timer_work
);
1046 if (!READ_ONCE(kfence_enabled
))
1049 num_stack_entries
= stack_trace_save(stack_entries
, KFENCE_STACK_DEPTH
, 0);
1052 * Do expensive check for coverage of allocation in slow-path after
1053 * allocation_gate has already become non-zero, even though it might
1054 * mean not making any allocation within a given sample interval.
1056 * This ensures reasonable allocation coverage when the pool is almost
1057 * full, including avoiding long-lived allocations of the same source
1058 * filling up the pool (e.g. pagecache allocations).
1060 alloc_stack_hash
= get_alloc_stack_hash(stack_entries
, num_stack_entries
);
1061 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash
)) {
1062 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_COVERED
]);
1066 return kfence_guarded_alloc(s
, size
, flags
, stack_entries
, num_stack_entries
,
1070 size_t kfence_ksize(const void *addr
)
1072 const struct kfence_metadata
*meta
= addr_to_metadata((unsigned long)addr
);
1075 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1076 * either a use-after-free or invalid access.
1078 return meta
? meta
->size
: 0;
1081 void *kfence_object_start(const void *addr
)
1083 const struct kfence_metadata
*meta
= addr_to_metadata((unsigned long)addr
);
1086 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1087 * either a use-after-free or invalid access.
1089 return meta
? (void *)meta
->addr
: NULL
;
1092 void __kfence_free(void *addr
)
1094 struct kfence_metadata
*meta
= addr_to_metadata((unsigned long)addr
);
1097 KFENCE_WARN_ON(meta
->objcg
);
1100 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1101 * the object, as the object page may be recycled for other-typed
1102 * objects once it has been freed. meta->cache may be NULL if the cache
1105 if (unlikely(meta
->cache
&& (meta
->cache
->flags
& SLAB_TYPESAFE_BY_RCU
)))
1106 call_rcu(&meta
->rcu_head
, rcu_guarded_free
);
1108 kfence_guarded_free(addr
, meta
, false);
1111 bool kfence_handle_page_fault(unsigned long addr
, bool is_write
, struct pt_regs
*regs
)
1113 const int page_index
= (addr
- (unsigned long)__kfence_pool
) / PAGE_SIZE
;
1114 struct kfence_metadata
*to_report
= NULL
;
1115 enum kfence_error_type error_type
;
1116 unsigned long flags
;
1118 if (!is_kfence_address((void *)addr
))
1121 if (!READ_ONCE(kfence_enabled
)) /* If disabled at runtime ... */
1122 return kfence_unprotect(addr
); /* ... unprotect and proceed. */
1124 atomic_long_inc(&counters
[KFENCE_COUNTER_BUGS
]);
1126 if (page_index
% 2) {
1127 /* This is a redzone, report a buffer overflow. */
1128 struct kfence_metadata
*meta
;
1131 meta
= addr_to_metadata(addr
- PAGE_SIZE
);
1132 if (meta
&& READ_ONCE(meta
->state
) == KFENCE_OBJECT_ALLOCATED
) {
1134 /* Data race ok; distance calculation approximate. */
1135 distance
= addr
- data_race(meta
->addr
+ meta
->size
);
1138 meta
= addr_to_metadata(addr
+ PAGE_SIZE
);
1139 if (meta
&& READ_ONCE(meta
->state
) == KFENCE_OBJECT_ALLOCATED
) {
1140 /* Data race ok; distance calculation approximate. */
1141 if (!to_report
|| distance
> data_race(meta
->addr
) - addr
)
1148 raw_spin_lock_irqsave(&to_report
->lock
, flags
);
1149 to_report
->unprotected_page
= addr
;
1150 error_type
= KFENCE_ERROR_OOB
;
1153 * If the object was freed before we took the look we can still
1154 * report this as an OOB -- the report will simply show the
1155 * stacktrace of the free as well.
1158 to_report
= addr_to_metadata(addr
);
1162 raw_spin_lock_irqsave(&to_report
->lock
, flags
);
1163 error_type
= KFENCE_ERROR_UAF
;
1165 * We may race with __kfence_alloc(), and it is possible that a
1166 * freed object may be reallocated. We simply report this as a
1167 * use-after-free, with the stack trace showing the place where
1168 * the object was re-allocated.
1174 kfence_report_error(addr
, is_write
, regs
, to_report
, error_type
);
1175 raw_spin_unlock_irqrestore(&to_report
->lock
, flags
);
1177 /* This may be a UAF or OOB access, but we can't be sure. */
1178 kfence_report_error(addr
, is_write
, regs
, NULL
, KFENCE_ERROR_INVALID
);
1181 return kfence_unprotect(addr
); /* Unprotect and let access proceed. */