]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/kmemleak.c
Merge tag 'io_uring-6.6-2023-09-08' of git://git.kernel.dk/linux
[thirdparty/linux.git] / mm / kmemleak.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17 * del_state modifications and accesses to the object_tree_root (or
18 * object_phys_tree_root). The object_list is the main list holding the
19 * metadata (struct kmemleak_object) for the allocated memory blocks.
20 * The object_tree_root and object_phys_tree_root are red
21 * black trees used to look-up metadata based on a pointer to the
22 * corresponding memory block. The object_phys_tree_root is for objects
23 * allocated with physical address. The kmemleak_object structures are
24 * added to the object_list and object_tree_root (or object_phys_tree_root)
25 * in the create_object() function called from the kmemleak_alloc() (or
26 * kmemleak_alloc_phys()) callback and removed in delete_object() called from
27 * the kmemleak_free() callback
28 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
29 * Accesses to the metadata (e.g. count) are protected by this lock. Note
30 * that some members of this structure may be protected by other means
31 * (atomic or kmemleak_lock). This lock is also held when scanning the
32 * corresponding memory block to avoid the kernel freeing it via the
33 * kmemleak_free() callback. This is less heavyweight than holding a global
34 * lock like kmemleak_lock during scanning.
35 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
36 * unreferenced objects at a time. The gray_list contains the objects which
37 * are already referenced or marked as false positives and need to be
38 * scanned. This list is only modified during a scanning episode when the
39 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
40 * Note that the kmemleak_object.use_count is incremented when an object is
41 * added to the gray_list and therefore cannot be freed. This mutex also
42 * prevents multiple users of the "kmemleak" debugfs file together with
43 * modifications to the memory scanning parameters including the scan_thread
44 * pointer
45 *
46 * Locks and mutexes are acquired/nested in the following order:
47 *
48 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
49 *
50 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
51 * regions.
52 *
53 * The kmemleak_object structures have a use_count incremented or decremented
54 * using the get_object()/put_object() functions. When the use_count becomes
55 * 0, this count can no longer be incremented and put_object() schedules the
56 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
57 * function must be protected by rcu_read_lock() to avoid accessing a freed
58 * structure.
59 */
60
61 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62
63 #include <linux/init.h>
64 #include <linux/kernel.h>
65 #include <linux/list.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/task.h>
68 #include <linux/sched/task_stack.h>
69 #include <linux/jiffies.h>
70 #include <linux/delay.h>
71 #include <linux/export.h>
72 #include <linux/kthread.h>
73 #include <linux/rbtree.h>
74 #include <linux/fs.h>
75 #include <linux/debugfs.h>
76 #include <linux/seq_file.h>
77 #include <linux/cpumask.h>
78 #include <linux/spinlock.h>
79 #include <linux/module.h>
80 #include <linux/mutex.h>
81 #include <linux/rcupdate.h>
82 #include <linux/stacktrace.h>
83 #include <linux/stackdepot.h>
84 #include <linux/cache.h>
85 #include <linux/percpu.h>
86 #include <linux/memblock.h>
87 #include <linux/pfn.h>
88 #include <linux/mmzone.h>
89 #include <linux/slab.h>
90 #include <linux/thread_info.h>
91 #include <linux/err.h>
92 #include <linux/uaccess.h>
93 #include <linux/string.h>
94 #include <linux/nodemask.h>
95 #include <linux/mm.h>
96 #include <linux/workqueue.h>
97 #include <linux/crc32.h>
98
99 #include <asm/sections.h>
100 #include <asm/processor.h>
101 #include <linux/atomic.h>
102
103 #include <linux/kasan.h>
104 #include <linux/kfence.h>
105 #include <linux/kmemleak.h>
106 #include <linux/memory_hotplug.h>
107
108 /*
109 * Kmemleak configuration and common defines.
110 */
111 #define MAX_TRACE 16 /* stack trace length */
112 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
113 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
114 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
115 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
116
117 #define BYTES_PER_POINTER sizeof(void *)
118
119 /* GFP bitmask for kmemleak internal allocations */
120 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
121 __GFP_NOLOCKDEP)) | \
122 __GFP_NORETRY | __GFP_NOMEMALLOC | \
123 __GFP_NOWARN)
124
125 /* scanning area inside a memory block */
126 struct kmemleak_scan_area {
127 struct hlist_node node;
128 unsigned long start;
129 size_t size;
130 };
131
132 #define KMEMLEAK_GREY 0
133 #define KMEMLEAK_BLACK -1
134
135 /*
136 * Structure holding the metadata for each allocated memory block.
137 * Modifications to such objects should be made while holding the
138 * object->lock. Insertions or deletions from object_list, gray_list or
139 * rb_node are already protected by the corresponding locks or mutex (see
140 * the notes on locking above). These objects are reference-counted
141 * (use_count) and freed using the RCU mechanism.
142 */
143 struct kmemleak_object {
144 raw_spinlock_t lock;
145 unsigned int flags; /* object status flags */
146 struct list_head object_list;
147 struct list_head gray_list;
148 struct rb_node rb_node;
149 struct rcu_head rcu; /* object_list lockless traversal */
150 /* object usage count; object freed when use_count == 0 */
151 atomic_t use_count;
152 unsigned int del_state; /* deletion state */
153 unsigned long pointer;
154 size_t size;
155 /* pass surplus references to this pointer */
156 unsigned long excess_ref;
157 /* minimum number of a pointers found before it is considered leak */
158 int min_count;
159 /* the total number of pointers found pointing to this object */
160 int count;
161 /* checksum for detecting modified objects */
162 u32 checksum;
163 /* memory ranges to be scanned inside an object (empty for all) */
164 struct hlist_head area_list;
165 depot_stack_handle_t trace_handle;
166 unsigned long jiffies; /* creation timestamp */
167 pid_t pid; /* pid of the current task */
168 char comm[TASK_COMM_LEN]; /* executable name */
169 };
170
171 /* flag representing the memory block allocation status */
172 #define OBJECT_ALLOCATED (1 << 0)
173 /* flag set after the first reporting of an unreference object */
174 #define OBJECT_REPORTED (1 << 1)
175 /* flag set to not scan the object */
176 #define OBJECT_NO_SCAN (1 << 2)
177 /* flag set to fully scan the object when scan_area allocation failed */
178 #define OBJECT_FULL_SCAN (1 << 3)
179 /* flag set for object allocated with physical address */
180 #define OBJECT_PHYS (1 << 4)
181
182 /* set when __remove_object() called */
183 #define DELSTATE_REMOVED (1 << 0)
184 /* set to temporarily prevent deletion from object_list */
185 #define DELSTATE_NO_DELETE (1 << 1)
186
187 #define HEX_PREFIX " "
188 /* number of bytes to print per line; must be 16 or 32 */
189 #define HEX_ROW_SIZE 16
190 /* number of bytes to print at a time (1, 2, 4, 8) */
191 #define HEX_GROUP_SIZE 1
192 /* include ASCII after the hex output */
193 #define HEX_ASCII 1
194 /* max number of lines to be printed */
195 #define HEX_MAX_LINES 2
196
197 /* the list of all allocated objects */
198 static LIST_HEAD(object_list);
199 /* the list of gray-colored objects (see color_gray comment below) */
200 static LIST_HEAD(gray_list);
201 /* memory pool allocation */
202 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
203 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
204 static LIST_HEAD(mem_pool_free_list);
205 /* search tree for object boundaries */
206 static struct rb_root object_tree_root = RB_ROOT;
207 /* search tree for object (with OBJECT_PHYS flag) boundaries */
208 static struct rb_root object_phys_tree_root = RB_ROOT;
209 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
210 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
211
212 /* allocation caches for kmemleak internal data */
213 static struct kmem_cache *object_cache;
214 static struct kmem_cache *scan_area_cache;
215
216 /* set if tracing memory operations is enabled */
217 static int kmemleak_enabled = 1;
218 /* same as above but only for the kmemleak_free() callback */
219 static int kmemleak_free_enabled = 1;
220 /* set in the late_initcall if there were no errors */
221 static int kmemleak_late_initialized;
222 /* set if a kmemleak warning was issued */
223 static int kmemleak_warning;
224 /* set if a fatal kmemleak error has occurred */
225 static int kmemleak_error;
226
227 /* minimum and maximum address that may be valid pointers */
228 static unsigned long min_addr = ULONG_MAX;
229 static unsigned long max_addr;
230
231 static struct task_struct *scan_thread;
232 /* used to avoid reporting of recently allocated objects */
233 static unsigned long jiffies_min_age;
234 static unsigned long jiffies_last_scan;
235 /* delay between automatic memory scannings */
236 static unsigned long jiffies_scan_wait;
237 /* enables or disables the task stacks scanning */
238 static int kmemleak_stack_scan = 1;
239 /* protects the memory scanning, parameters and debug/kmemleak file access */
240 static DEFINE_MUTEX(scan_mutex);
241 /* setting kmemleak=on, will set this var, skipping the disable */
242 static int kmemleak_skip_disable;
243 /* If there are leaks that can be reported */
244 static bool kmemleak_found_leaks;
245
246 static bool kmemleak_verbose;
247 module_param_named(verbose, kmemleak_verbose, bool, 0600);
248
249 static void kmemleak_disable(void);
250
251 /*
252 * Print a warning and dump the stack trace.
253 */
254 #define kmemleak_warn(x...) do { \
255 pr_warn(x); \
256 dump_stack(); \
257 kmemleak_warning = 1; \
258 } while (0)
259
260 /*
261 * Macro invoked when a serious kmemleak condition occurred and cannot be
262 * recovered from. Kmemleak will be disabled and further allocation/freeing
263 * tracing no longer available.
264 */
265 #define kmemleak_stop(x...) do { \
266 kmemleak_warn(x); \
267 kmemleak_disable(); \
268 } while (0)
269
270 #define warn_or_seq_printf(seq, fmt, ...) do { \
271 if (seq) \
272 seq_printf(seq, fmt, ##__VA_ARGS__); \
273 else \
274 pr_warn(fmt, ##__VA_ARGS__); \
275 } while (0)
276
277 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
278 int rowsize, int groupsize, const void *buf,
279 size_t len, bool ascii)
280 {
281 if (seq)
282 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
283 buf, len, ascii);
284 else
285 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
286 rowsize, groupsize, buf, len, ascii);
287 }
288
289 /*
290 * Printing of the objects hex dump to the seq file. The number of lines to be
291 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
292 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
293 * with the object->lock held.
294 */
295 static void hex_dump_object(struct seq_file *seq,
296 struct kmemleak_object *object)
297 {
298 const u8 *ptr = (const u8 *)object->pointer;
299 size_t len;
300
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
302 return;
303
304 /* limit the number of lines to HEX_MAX_LINES */
305 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
306
307 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
308 kasan_disable_current();
309 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
310 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
311 kasan_enable_current();
312 }
313
314 /*
315 * Object colors, encoded with count and min_count:
316 * - white - orphan object, not enough references to it (count < min_count)
317 * - gray - not orphan, not marked as false positive (min_count == 0) or
318 * sufficient references to it (count >= min_count)
319 * - black - ignore, it doesn't contain references (e.g. text section)
320 * (min_count == -1). No function defined for this color.
321 * Newly created objects don't have any color assigned (object->count == -1)
322 * before the next memory scan when they become white.
323 */
324 static bool color_white(const struct kmemleak_object *object)
325 {
326 return object->count != KMEMLEAK_BLACK &&
327 object->count < object->min_count;
328 }
329
330 static bool color_gray(const struct kmemleak_object *object)
331 {
332 return object->min_count != KMEMLEAK_BLACK &&
333 object->count >= object->min_count;
334 }
335
336 /*
337 * Objects are considered unreferenced only if their color is white, they have
338 * not be deleted and have a minimum age to avoid false positives caused by
339 * pointers temporarily stored in CPU registers.
340 */
341 static bool unreferenced_object(struct kmemleak_object *object)
342 {
343 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
344 time_before_eq(object->jiffies + jiffies_min_age,
345 jiffies_last_scan);
346 }
347
348 /*
349 * Printing of the unreferenced objects information to the seq file. The
350 * print_unreferenced function must be called with the object->lock held.
351 */
352 static void print_unreferenced(struct seq_file *seq,
353 struct kmemleak_object *object)
354 {
355 int i;
356 unsigned long *entries;
357 unsigned int nr_entries;
358 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
359
360 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
361 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
362 object->pointer, object->size);
363 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
364 object->comm, object->pid, object->jiffies,
365 msecs_age / 1000, msecs_age % 1000);
366 hex_dump_object(seq, object);
367 warn_or_seq_printf(seq, " backtrace:\n");
368
369 for (i = 0; i < nr_entries; i++) {
370 void *ptr = (void *)entries[i];
371 warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
372 }
373 }
374
375 /*
376 * Print the kmemleak_object information. This function is used mainly for
377 * debugging special cases when kmemleak operations. It must be called with
378 * the object->lock held.
379 */
380 static void dump_object_info(struct kmemleak_object *object)
381 {
382 pr_notice("Object 0x%08lx (size %zu):\n",
383 object->pointer, object->size);
384 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
385 object->comm, object->pid, object->jiffies);
386 pr_notice(" min_count = %d\n", object->min_count);
387 pr_notice(" count = %d\n", object->count);
388 pr_notice(" flags = 0x%x\n", object->flags);
389 pr_notice(" checksum = %u\n", object->checksum);
390 pr_notice(" backtrace:\n");
391 if (object->trace_handle)
392 stack_depot_print(object->trace_handle);
393 }
394
395 /*
396 * Look-up a memory block metadata (kmemleak_object) in the object search
397 * tree based on a pointer value. If alias is 0, only values pointing to the
398 * beginning of the memory block are allowed. The kmemleak_lock must be held
399 * when calling this function.
400 */
401 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
402 bool is_phys)
403 {
404 struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
405 object_tree_root.rb_node;
406 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
407
408 while (rb) {
409 struct kmemleak_object *object;
410 unsigned long untagged_objp;
411
412 object = rb_entry(rb, struct kmemleak_object, rb_node);
413 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
414
415 if (untagged_ptr < untagged_objp)
416 rb = object->rb_node.rb_left;
417 else if (untagged_objp + object->size <= untagged_ptr)
418 rb = object->rb_node.rb_right;
419 else if (untagged_objp == untagged_ptr || alias)
420 return object;
421 else {
422 kmemleak_warn("Found object by alias at 0x%08lx\n",
423 ptr);
424 dump_object_info(object);
425 break;
426 }
427 }
428 return NULL;
429 }
430
431 /* Look-up a kmemleak object which allocated with virtual address. */
432 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
433 {
434 return __lookup_object(ptr, alias, false);
435 }
436
437 /*
438 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
439 * that once an object's use_count reached 0, the RCU freeing was already
440 * registered and the object should no longer be used. This function must be
441 * called under the protection of rcu_read_lock().
442 */
443 static int get_object(struct kmemleak_object *object)
444 {
445 return atomic_inc_not_zero(&object->use_count);
446 }
447
448 /*
449 * Memory pool allocation and freeing. kmemleak_lock must not be held.
450 */
451 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
452 {
453 unsigned long flags;
454 struct kmemleak_object *object;
455
456 /* try the slab allocator first */
457 if (object_cache) {
458 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
459 if (object)
460 return object;
461 }
462
463 /* slab allocation failed, try the memory pool */
464 raw_spin_lock_irqsave(&kmemleak_lock, flags);
465 object = list_first_entry_or_null(&mem_pool_free_list,
466 typeof(*object), object_list);
467 if (object)
468 list_del(&object->object_list);
469 else if (mem_pool_free_count)
470 object = &mem_pool[--mem_pool_free_count];
471 else
472 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
473 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
474
475 return object;
476 }
477
478 /*
479 * Return the object to either the slab allocator or the memory pool.
480 */
481 static void mem_pool_free(struct kmemleak_object *object)
482 {
483 unsigned long flags;
484
485 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
486 kmem_cache_free(object_cache, object);
487 return;
488 }
489
490 /* add the object to the memory pool free list */
491 raw_spin_lock_irqsave(&kmemleak_lock, flags);
492 list_add(&object->object_list, &mem_pool_free_list);
493 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
494 }
495
496 /*
497 * RCU callback to free a kmemleak_object.
498 */
499 static void free_object_rcu(struct rcu_head *rcu)
500 {
501 struct hlist_node *tmp;
502 struct kmemleak_scan_area *area;
503 struct kmemleak_object *object =
504 container_of(rcu, struct kmemleak_object, rcu);
505
506 /*
507 * Once use_count is 0 (guaranteed by put_object), there is no other
508 * code accessing this object, hence no need for locking.
509 */
510 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
511 hlist_del(&area->node);
512 kmem_cache_free(scan_area_cache, area);
513 }
514 mem_pool_free(object);
515 }
516
517 /*
518 * Decrement the object use_count. Once the count is 0, free the object using
519 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
520 * delete_object() path, the delayed RCU freeing ensures that there is no
521 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
522 * is also possible.
523 */
524 static void put_object(struct kmemleak_object *object)
525 {
526 if (!atomic_dec_and_test(&object->use_count))
527 return;
528
529 /* should only get here after delete_object was called */
530 WARN_ON(object->flags & OBJECT_ALLOCATED);
531
532 /*
533 * It may be too early for the RCU callbacks, however, there is no
534 * concurrent object_list traversal when !object_cache and all objects
535 * came from the memory pool. Free the object directly.
536 */
537 if (object_cache)
538 call_rcu(&object->rcu, free_object_rcu);
539 else
540 free_object_rcu(&object->rcu);
541 }
542
543 /*
544 * Look up an object in the object search tree and increase its use_count.
545 */
546 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
547 bool is_phys)
548 {
549 unsigned long flags;
550 struct kmemleak_object *object;
551
552 rcu_read_lock();
553 raw_spin_lock_irqsave(&kmemleak_lock, flags);
554 object = __lookup_object(ptr, alias, is_phys);
555 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
556
557 /* check whether the object is still available */
558 if (object && !get_object(object))
559 object = NULL;
560 rcu_read_unlock();
561
562 return object;
563 }
564
565 /* Look up and get an object which allocated with virtual address. */
566 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
567 {
568 return __find_and_get_object(ptr, alias, false);
569 }
570
571 /*
572 * Remove an object from the object_tree_root (or object_phys_tree_root)
573 * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
574 * is still enabled.
575 */
576 static void __remove_object(struct kmemleak_object *object)
577 {
578 rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
579 &object_phys_tree_root :
580 &object_tree_root);
581 if (!(object->del_state & DELSTATE_NO_DELETE))
582 list_del_rcu(&object->object_list);
583 object->del_state |= DELSTATE_REMOVED;
584 }
585
586 /*
587 * Look up an object in the object search tree and remove it from both
588 * object_tree_root (or object_phys_tree_root) and object_list. The
589 * returned object's use_count should be at least 1, as initially set
590 * by create_object().
591 */
592 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
593 bool is_phys)
594 {
595 unsigned long flags;
596 struct kmemleak_object *object;
597
598 raw_spin_lock_irqsave(&kmemleak_lock, flags);
599 object = __lookup_object(ptr, alias, is_phys);
600 if (object)
601 __remove_object(object);
602 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
603
604 return object;
605 }
606
607 static noinline depot_stack_handle_t set_track_prepare(void)
608 {
609 depot_stack_handle_t trace_handle;
610 unsigned long entries[MAX_TRACE];
611 unsigned int nr_entries;
612
613 /*
614 * Use object_cache to determine whether kmemleak_init() has
615 * been invoked. stack_depot_early_init() is called before
616 * kmemleak_init() in mm_core_init().
617 */
618 if (!object_cache)
619 return 0;
620 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
621 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
622
623 return trace_handle;
624 }
625
626 /*
627 * Create the metadata (struct kmemleak_object) corresponding to an allocated
628 * memory block and add it to the object_list and object_tree_root (or
629 * object_phys_tree_root).
630 */
631 static void __create_object(unsigned long ptr, size_t size,
632 int min_count, gfp_t gfp, bool is_phys)
633 {
634 unsigned long flags;
635 struct kmemleak_object *object, *parent;
636 struct rb_node **link, *rb_parent;
637 unsigned long untagged_ptr;
638 unsigned long untagged_objp;
639
640 object = mem_pool_alloc(gfp);
641 if (!object) {
642 pr_warn("Cannot allocate a kmemleak_object structure\n");
643 kmemleak_disable();
644 return;
645 }
646
647 INIT_LIST_HEAD(&object->object_list);
648 INIT_LIST_HEAD(&object->gray_list);
649 INIT_HLIST_HEAD(&object->area_list);
650 raw_spin_lock_init(&object->lock);
651 atomic_set(&object->use_count, 1);
652 object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
653 object->pointer = ptr;
654 object->size = kfence_ksize((void *)ptr) ?: size;
655 object->excess_ref = 0;
656 object->min_count = min_count;
657 object->count = 0; /* white color initially */
658 object->jiffies = jiffies;
659 object->checksum = 0;
660 object->del_state = 0;
661
662 /* task information */
663 if (in_hardirq()) {
664 object->pid = 0;
665 strncpy(object->comm, "hardirq", sizeof(object->comm));
666 } else if (in_serving_softirq()) {
667 object->pid = 0;
668 strncpy(object->comm, "softirq", sizeof(object->comm));
669 } else {
670 object->pid = current->pid;
671 /*
672 * There is a small chance of a race with set_task_comm(),
673 * however using get_task_comm() here may cause locking
674 * dependency issues with current->alloc_lock. In the worst
675 * case, the command line is not correct.
676 */
677 strncpy(object->comm, current->comm, sizeof(object->comm));
678 }
679
680 /* kernel backtrace */
681 object->trace_handle = set_track_prepare();
682
683 raw_spin_lock_irqsave(&kmemleak_lock, flags);
684
685 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
686 /*
687 * Only update min_addr and max_addr with object
688 * storing virtual address.
689 */
690 if (!is_phys) {
691 min_addr = min(min_addr, untagged_ptr);
692 max_addr = max(max_addr, untagged_ptr + size);
693 }
694 link = is_phys ? &object_phys_tree_root.rb_node :
695 &object_tree_root.rb_node;
696 rb_parent = NULL;
697 while (*link) {
698 rb_parent = *link;
699 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
700 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
701 if (untagged_ptr + size <= untagged_objp)
702 link = &parent->rb_node.rb_left;
703 else if (untagged_objp + parent->size <= untagged_ptr)
704 link = &parent->rb_node.rb_right;
705 else {
706 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
707 ptr);
708 /*
709 * No need for parent->lock here since "parent" cannot
710 * be freed while the kmemleak_lock is held.
711 */
712 dump_object_info(parent);
713 kmem_cache_free(object_cache, object);
714 goto out;
715 }
716 }
717 rb_link_node(&object->rb_node, rb_parent, link);
718 rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
719 &object_tree_root);
720 list_add_tail_rcu(&object->object_list, &object_list);
721 out:
722 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
723 }
724
725 /* Create kmemleak object which allocated with virtual address. */
726 static void create_object(unsigned long ptr, size_t size,
727 int min_count, gfp_t gfp)
728 {
729 __create_object(ptr, size, min_count, gfp, false);
730 }
731
732 /* Create kmemleak object which allocated with physical address. */
733 static void create_object_phys(unsigned long ptr, size_t size,
734 int min_count, gfp_t gfp)
735 {
736 __create_object(ptr, size, min_count, gfp, true);
737 }
738
739 /*
740 * Mark the object as not allocated and schedule RCU freeing via put_object().
741 */
742 static void __delete_object(struct kmemleak_object *object)
743 {
744 unsigned long flags;
745
746 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
747 WARN_ON(atomic_read(&object->use_count) < 1);
748
749 /*
750 * Locking here also ensures that the corresponding memory block
751 * cannot be freed when it is being scanned.
752 */
753 raw_spin_lock_irqsave(&object->lock, flags);
754 object->flags &= ~OBJECT_ALLOCATED;
755 raw_spin_unlock_irqrestore(&object->lock, flags);
756 put_object(object);
757 }
758
759 /*
760 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
761 * delete it.
762 */
763 static void delete_object_full(unsigned long ptr)
764 {
765 struct kmemleak_object *object;
766
767 object = find_and_remove_object(ptr, 0, false);
768 if (!object) {
769 #ifdef DEBUG
770 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
771 ptr);
772 #endif
773 return;
774 }
775 __delete_object(object);
776 }
777
778 /*
779 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
780 * delete it. If the memory block is partially freed, the function may create
781 * additional metadata for the remaining parts of the block.
782 */
783 static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
784 {
785 struct kmemleak_object *object;
786 unsigned long start, end;
787
788 object = find_and_remove_object(ptr, 1, is_phys);
789 if (!object) {
790 #ifdef DEBUG
791 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
792 ptr, size);
793 #endif
794 return;
795 }
796
797 /*
798 * Create one or two objects that may result from the memory block
799 * split. Note that partial freeing is only done by free_bootmem() and
800 * this happens before kmemleak_init() is called.
801 */
802 start = object->pointer;
803 end = object->pointer + object->size;
804 if (ptr > start)
805 __create_object(start, ptr - start, object->min_count,
806 GFP_KERNEL, is_phys);
807 if (ptr + size < end)
808 __create_object(ptr + size, end - ptr - size, object->min_count,
809 GFP_KERNEL, is_phys);
810
811 __delete_object(object);
812 }
813
814 static void __paint_it(struct kmemleak_object *object, int color)
815 {
816 object->min_count = color;
817 if (color == KMEMLEAK_BLACK)
818 object->flags |= OBJECT_NO_SCAN;
819 }
820
821 static void paint_it(struct kmemleak_object *object, int color)
822 {
823 unsigned long flags;
824
825 raw_spin_lock_irqsave(&object->lock, flags);
826 __paint_it(object, color);
827 raw_spin_unlock_irqrestore(&object->lock, flags);
828 }
829
830 static void paint_ptr(unsigned long ptr, int color, bool is_phys)
831 {
832 struct kmemleak_object *object;
833
834 object = __find_and_get_object(ptr, 0, is_phys);
835 if (!object) {
836 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
837 ptr,
838 (color == KMEMLEAK_GREY) ? "Grey" :
839 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
840 return;
841 }
842 paint_it(object, color);
843 put_object(object);
844 }
845
846 /*
847 * Mark an object permanently as gray-colored so that it can no longer be
848 * reported as a leak. This is used in general to mark a false positive.
849 */
850 static void make_gray_object(unsigned long ptr)
851 {
852 paint_ptr(ptr, KMEMLEAK_GREY, false);
853 }
854
855 /*
856 * Mark the object as black-colored so that it is ignored from scans and
857 * reporting.
858 */
859 static void make_black_object(unsigned long ptr, bool is_phys)
860 {
861 paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
862 }
863
864 /*
865 * Add a scanning area to the object. If at least one such area is added,
866 * kmemleak will only scan these ranges rather than the whole memory block.
867 */
868 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
869 {
870 unsigned long flags;
871 struct kmemleak_object *object;
872 struct kmemleak_scan_area *area = NULL;
873 unsigned long untagged_ptr;
874 unsigned long untagged_objp;
875
876 object = find_and_get_object(ptr, 1);
877 if (!object) {
878 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
879 ptr);
880 return;
881 }
882
883 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
884 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
885
886 if (scan_area_cache)
887 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
888
889 raw_spin_lock_irqsave(&object->lock, flags);
890 if (!area) {
891 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
892 /* mark the object for full scan to avoid false positives */
893 object->flags |= OBJECT_FULL_SCAN;
894 goto out_unlock;
895 }
896 if (size == SIZE_MAX) {
897 size = untagged_objp + object->size - untagged_ptr;
898 } else if (untagged_ptr + size > untagged_objp + object->size) {
899 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
900 dump_object_info(object);
901 kmem_cache_free(scan_area_cache, area);
902 goto out_unlock;
903 }
904
905 INIT_HLIST_NODE(&area->node);
906 area->start = ptr;
907 area->size = size;
908
909 hlist_add_head(&area->node, &object->area_list);
910 out_unlock:
911 raw_spin_unlock_irqrestore(&object->lock, flags);
912 put_object(object);
913 }
914
915 /*
916 * Any surplus references (object already gray) to 'ptr' are passed to
917 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
918 * vm_struct may be used as an alternative reference to the vmalloc'ed object
919 * (see free_thread_stack()).
920 */
921 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
922 {
923 unsigned long flags;
924 struct kmemleak_object *object;
925
926 object = find_and_get_object(ptr, 0);
927 if (!object) {
928 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
929 ptr);
930 return;
931 }
932
933 raw_spin_lock_irqsave(&object->lock, flags);
934 object->excess_ref = excess_ref;
935 raw_spin_unlock_irqrestore(&object->lock, flags);
936 put_object(object);
937 }
938
939 /*
940 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
941 * pointer. Such object will not be scanned by kmemleak but references to it
942 * are searched.
943 */
944 static void object_no_scan(unsigned long ptr)
945 {
946 unsigned long flags;
947 struct kmemleak_object *object;
948
949 object = find_and_get_object(ptr, 0);
950 if (!object) {
951 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
952 return;
953 }
954
955 raw_spin_lock_irqsave(&object->lock, flags);
956 object->flags |= OBJECT_NO_SCAN;
957 raw_spin_unlock_irqrestore(&object->lock, flags);
958 put_object(object);
959 }
960
961 /**
962 * kmemleak_alloc - register a newly allocated object
963 * @ptr: pointer to beginning of the object
964 * @size: size of the object
965 * @min_count: minimum number of references to this object. If during memory
966 * scanning a number of references less than @min_count is found,
967 * the object is reported as a memory leak. If @min_count is 0,
968 * the object is never reported as a leak. If @min_count is -1,
969 * the object is ignored (not scanned and not reported as a leak)
970 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
971 *
972 * This function is called from the kernel allocators when a new object
973 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
974 */
975 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
976 gfp_t gfp)
977 {
978 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
979
980 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
981 create_object((unsigned long)ptr, size, min_count, gfp);
982 }
983 EXPORT_SYMBOL_GPL(kmemleak_alloc);
984
985 /**
986 * kmemleak_alloc_percpu - register a newly allocated __percpu object
987 * @ptr: __percpu pointer to beginning of the object
988 * @size: size of the object
989 * @gfp: flags used for kmemleak internal memory allocations
990 *
991 * This function is called from the kernel percpu allocator when a new object
992 * (memory block) is allocated (alloc_percpu).
993 */
994 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
995 gfp_t gfp)
996 {
997 unsigned int cpu;
998
999 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
1000
1001 /*
1002 * Percpu allocations are only scanned and not reported as leaks
1003 * (min_count is set to 0).
1004 */
1005 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1006 for_each_possible_cpu(cpu)
1007 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
1008 size, 0, gfp);
1009 }
1010 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1011
1012 /**
1013 * kmemleak_vmalloc - register a newly vmalloc'ed object
1014 * @area: pointer to vm_struct
1015 * @size: size of the object
1016 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1017 *
1018 * This function is called from the vmalloc() kernel allocator when a new
1019 * object (memory block) is allocated.
1020 */
1021 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1022 {
1023 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1024
1025 /*
1026 * A min_count = 2 is needed because vm_struct contains a reference to
1027 * the virtual address of the vmalloc'ed block.
1028 */
1029 if (kmemleak_enabled) {
1030 create_object((unsigned long)area->addr, size, 2, gfp);
1031 object_set_excess_ref((unsigned long)area,
1032 (unsigned long)area->addr);
1033 }
1034 }
1035 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1036
1037 /**
1038 * kmemleak_free - unregister a previously registered object
1039 * @ptr: pointer to beginning of the object
1040 *
1041 * This function is called from the kernel allocators when an object (memory
1042 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1043 */
1044 void __ref kmemleak_free(const void *ptr)
1045 {
1046 pr_debug("%s(0x%p)\n", __func__, ptr);
1047
1048 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1049 delete_object_full((unsigned long)ptr);
1050 }
1051 EXPORT_SYMBOL_GPL(kmemleak_free);
1052
1053 /**
1054 * kmemleak_free_part - partially unregister a previously registered object
1055 * @ptr: pointer to the beginning or inside the object. This also
1056 * represents the start of the range to be freed
1057 * @size: size to be unregistered
1058 *
1059 * This function is called when only a part of a memory block is freed
1060 * (usually from the bootmem allocator).
1061 */
1062 void __ref kmemleak_free_part(const void *ptr, size_t size)
1063 {
1064 pr_debug("%s(0x%p)\n", __func__, ptr);
1065
1066 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1067 delete_object_part((unsigned long)ptr, size, false);
1068 }
1069 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1070
1071 /**
1072 * kmemleak_free_percpu - unregister a previously registered __percpu object
1073 * @ptr: __percpu pointer to beginning of the object
1074 *
1075 * This function is called from the kernel percpu allocator when an object
1076 * (memory block) is freed (free_percpu).
1077 */
1078 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1079 {
1080 unsigned int cpu;
1081
1082 pr_debug("%s(0x%p)\n", __func__, ptr);
1083
1084 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1085 for_each_possible_cpu(cpu)
1086 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1087 cpu));
1088 }
1089 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1090
1091 /**
1092 * kmemleak_update_trace - update object allocation stack trace
1093 * @ptr: pointer to beginning of the object
1094 *
1095 * Override the object allocation stack trace for cases where the actual
1096 * allocation place is not always useful.
1097 */
1098 void __ref kmemleak_update_trace(const void *ptr)
1099 {
1100 struct kmemleak_object *object;
1101 unsigned long flags;
1102
1103 pr_debug("%s(0x%p)\n", __func__, ptr);
1104
1105 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1106 return;
1107
1108 object = find_and_get_object((unsigned long)ptr, 1);
1109 if (!object) {
1110 #ifdef DEBUG
1111 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1112 ptr);
1113 #endif
1114 return;
1115 }
1116
1117 raw_spin_lock_irqsave(&object->lock, flags);
1118 object->trace_handle = set_track_prepare();
1119 raw_spin_unlock_irqrestore(&object->lock, flags);
1120
1121 put_object(object);
1122 }
1123 EXPORT_SYMBOL(kmemleak_update_trace);
1124
1125 /**
1126 * kmemleak_not_leak - mark an allocated object as false positive
1127 * @ptr: pointer to beginning of the object
1128 *
1129 * Calling this function on an object will cause the memory block to no longer
1130 * be reported as leak and always be scanned.
1131 */
1132 void __ref kmemleak_not_leak(const void *ptr)
1133 {
1134 pr_debug("%s(0x%p)\n", __func__, ptr);
1135
1136 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1137 make_gray_object((unsigned long)ptr);
1138 }
1139 EXPORT_SYMBOL(kmemleak_not_leak);
1140
1141 /**
1142 * kmemleak_ignore - ignore an allocated object
1143 * @ptr: pointer to beginning of the object
1144 *
1145 * Calling this function on an object will cause the memory block to be
1146 * ignored (not scanned and not reported as a leak). This is usually done when
1147 * it is known that the corresponding block is not a leak and does not contain
1148 * any references to other allocated memory blocks.
1149 */
1150 void __ref kmemleak_ignore(const void *ptr)
1151 {
1152 pr_debug("%s(0x%p)\n", __func__, ptr);
1153
1154 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1155 make_black_object((unsigned long)ptr, false);
1156 }
1157 EXPORT_SYMBOL(kmemleak_ignore);
1158
1159 /**
1160 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1161 * @ptr: pointer to beginning or inside the object. This also
1162 * represents the start of the scan area
1163 * @size: size of the scan area
1164 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1165 *
1166 * This function is used when it is known that only certain parts of an object
1167 * contain references to other objects. Kmemleak will only scan these areas
1168 * reducing the number false negatives.
1169 */
1170 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1171 {
1172 pr_debug("%s(0x%p)\n", __func__, ptr);
1173
1174 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1175 add_scan_area((unsigned long)ptr, size, gfp);
1176 }
1177 EXPORT_SYMBOL(kmemleak_scan_area);
1178
1179 /**
1180 * kmemleak_no_scan - do not scan an allocated object
1181 * @ptr: pointer to beginning of the object
1182 *
1183 * This function notifies kmemleak not to scan the given memory block. Useful
1184 * in situations where it is known that the given object does not contain any
1185 * references to other objects. Kmemleak will not scan such objects reducing
1186 * the number of false negatives.
1187 */
1188 void __ref kmemleak_no_scan(const void *ptr)
1189 {
1190 pr_debug("%s(0x%p)\n", __func__, ptr);
1191
1192 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1193 object_no_scan((unsigned long)ptr);
1194 }
1195 EXPORT_SYMBOL(kmemleak_no_scan);
1196
1197 /**
1198 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1199 * address argument
1200 * @phys: physical address of the object
1201 * @size: size of the object
1202 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1203 */
1204 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1205 {
1206 pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
1207
1208 if (kmemleak_enabled)
1209 /*
1210 * Create object with OBJECT_PHYS flag and
1211 * assume min_count 0.
1212 */
1213 create_object_phys((unsigned long)phys, size, 0, gfp);
1214 }
1215 EXPORT_SYMBOL(kmemleak_alloc_phys);
1216
1217 /**
1218 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1219 * physical address argument
1220 * @phys: physical address if the beginning or inside an object. This
1221 * also represents the start of the range to be freed
1222 * @size: size to be unregistered
1223 */
1224 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1225 {
1226 pr_debug("%s(0x%pa)\n", __func__, &phys);
1227
1228 if (kmemleak_enabled)
1229 delete_object_part((unsigned long)phys, size, true);
1230 }
1231 EXPORT_SYMBOL(kmemleak_free_part_phys);
1232
1233 /**
1234 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1235 * address argument
1236 * @phys: physical address of the object
1237 */
1238 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1239 {
1240 pr_debug("%s(0x%pa)\n", __func__, &phys);
1241
1242 if (kmemleak_enabled)
1243 make_black_object((unsigned long)phys, true);
1244 }
1245 EXPORT_SYMBOL(kmemleak_ignore_phys);
1246
1247 /*
1248 * Update an object's checksum and return true if it was modified.
1249 */
1250 static bool update_checksum(struct kmemleak_object *object)
1251 {
1252 u32 old_csum = object->checksum;
1253
1254 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1255 return false;
1256
1257 kasan_disable_current();
1258 kcsan_disable_current();
1259 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1260 kasan_enable_current();
1261 kcsan_enable_current();
1262
1263 return object->checksum != old_csum;
1264 }
1265
1266 /*
1267 * Update an object's references. object->lock must be held by the caller.
1268 */
1269 static void update_refs(struct kmemleak_object *object)
1270 {
1271 if (!color_white(object)) {
1272 /* non-orphan, ignored or new */
1273 return;
1274 }
1275
1276 /*
1277 * Increase the object's reference count (number of pointers to the
1278 * memory block). If this count reaches the required minimum, the
1279 * object's color will become gray and it will be added to the
1280 * gray_list.
1281 */
1282 object->count++;
1283 if (color_gray(object)) {
1284 /* put_object() called when removing from gray_list */
1285 WARN_ON(!get_object(object));
1286 list_add_tail(&object->gray_list, &gray_list);
1287 }
1288 }
1289
1290 /*
1291 * Memory scanning is a long process and it needs to be interruptible. This
1292 * function checks whether such interrupt condition occurred.
1293 */
1294 static int scan_should_stop(void)
1295 {
1296 if (!kmemleak_enabled)
1297 return 1;
1298
1299 /*
1300 * This function may be called from either process or kthread context,
1301 * hence the need to check for both stop conditions.
1302 */
1303 if (current->mm)
1304 return signal_pending(current);
1305 else
1306 return kthread_should_stop();
1307
1308 return 0;
1309 }
1310
1311 /*
1312 * Scan a memory block (exclusive range) for valid pointers and add those
1313 * found to the gray list.
1314 */
1315 static void scan_block(void *_start, void *_end,
1316 struct kmemleak_object *scanned)
1317 {
1318 unsigned long *ptr;
1319 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1320 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1321 unsigned long flags;
1322 unsigned long untagged_ptr;
1323
1324 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1325 for (ptr = start; ptr < end; ptr++) {
1326 struct kmemleak_object *object;
1327 unsigned long pointer;
1328 unsigned long excess_ref;
1329
1330 if (scan_should_stop())
1331 break;
1332
1333 kasan_disable_current();
1334 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1335 kasan_enable_current();
1336
1337 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1338 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1339 continue;
1340
1341 /*
1342 * No need for get_object() here since we hold kmemleak_lock.
1343 * object->use_count cannot be dropped to 0 while the object
1344 * is still present in object_tree_root and object_list
1345 * (with updates protected by kmemleak_lock).
1346 */
1347 object = lookup_object(pointer, 1);
1348 if (!object)
1349 continue;
1350 if (object == scanned)
1351 /* self referenced, ignore */
1352 continue;
1353
1354 /*
1355 * Avoid the lockdep recursive warning on object->lock being
1356 * previously acquired in scan_object(). These locks are
1357 * enclosed by scan_mutex.
1358 */
1359 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1360 /* only pass surplus references (object already gray) */
1361 if (color_gray(object)) {
1362 excess_ref = object->excess_ref;
1363 /* no need for update_refs() if object already gray */
1364 } else {
1365 excess_ref = 0;
1366 update_refs(object);
1367 }
1368 raw_spin_unlock(&object->lock);
1369
1370 if (excess_ref) {
1371 object = lookup_object(excess_ref, 0);
1372 if (!object)
1373 continue;
1374 if (object == scanned)
1375 /* circular reference, ignore */
1376 continue;
1377 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1378 update_refs(object);
1379 raw_spin_unlock(&object->lock);
1380 }
1381 }
1382 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1383 }
1384
1385 /*
1386 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1387 */
1388 #ifdef CONFIG_SMP
1389 static void scan_large_block(void *start, void *end)
1390 {
1391 void *next;
1392
1393 while (start < end) {
1394 next = min(start + MAX_SCAN_SIZE, end);
1395 scan_block(start, next, NULL);
1396 start = next;
1397 cond_resched();
1398 }
1399 }
1400 #endif
1401
1402 /*
1403 * Scan a memory block corresponding to a kmemleak_object. A condition is
1404 * that object->use_count >= 1.
1405 */
1406 static void scan_object(struct kmemleak_object *object)
1407 {
1408 struct kmemleak_scan_area *area;
1409 unsigned long flags;
1410 void *obj_ptr;
1411
1412 /*
1413 * Once the object->lock is acquired, the corresponding memory block
1414 * cannot be freed (the same lock is acquired in delete_object).
1415 */
1416 raw_spin_lock_irqsave(&object->lock, flags);
1417 if (object->flags & OBJECT_NO_SCAN)
1418 goto out;
1419 if (!(object->flags & OBJECT_ALLOCATED))
1420 /* already freed object */
1421 goto out;
1422
1423 obj_ptr = object->flags & OBJECT_PHYS ?
1424 __va((phys_addr_t)object->pointer) :
1425 (void *)object->pointer;
1426
1427 if (hlist_empty(&object->area_list) ||
1428 object->flags & OBJECT_FULL_SCAN) {
1429 void *start = obj_ptr;
1430 void *end = obj_ptr + object->size;
1431 void *next;
1432
1433 do {
1434 next = min(start + MAX_SCAN_SIZE, end);
1435 scan_block(start, next, object);
1436
1437 start = next;
1438 if (start >= end)
1439 break;
1440
1441 raw_spin_unlock_irqrestore(&object->lock, flags);
1442 cond_resched();
1443 raw_spin_lock_irqsave(&object->lock, flags);
1444 } while (object->flags & OBJECT_ALLOCATED);
1445 } else
1446 hlist_for_each_entry(area, &object->area_list, node)
1447 scan_block((void *)area->start,
1448 (void *)(area->start + area->size),
1449 object);
1450 out:
1451 raw_spin_unlock_irqrestore(&object->lock, flags);
1452 }
1453
1454 /*
1455 * Scan the objects already referenced (gray objects). More objects will be
1456 * referenced and, if there are no memory leaks, all the objects are scanned.
1457 */
1458 static void scan_gray_list(void)
1459 {
1460 struct kmemleak_object *object, *tmp;
1461
1462 /*
1463 * The list traversal is safe for both tail additions and removals
1464 * from inside the loop. The kmemleak objects cannot be freed from
1465 * outside the loop because their use_count was incremented.
1466 */
1467 object = list_entry(gray_list.next, typeof(*object), gray_list);
1468 while (&object->gray_list != &gray_list) {
1469 cond_resched();
1470
1471 /* may add new objects to the list */
1472 if (!scan_should_stop())
1473 scan_object(object);
1474
1475 tmp = list_entry(object->gray_list.next, typeof(*object),
1476 gray_list);
1477
1478 /* remove the object from the list and release it */
1479 list_del(&object->gray_list);
1480 put_object(object);
1481
1482 object = tmp;
1483 }
1484 WARN_ON(!list_empty(&gray_list));
1485 }
1486
1487 /*
1488 * Conditionally call resched() in an object iteration loop while making sure
1489 * that the given object won't go away without RCU read lock by performing a
1490 * get_object() if necessaary.
1491 */
1492 static void kmemleak_cond_resched(struct kmemleak_object *object)
1493 {
1494 if (!get_object(object))
1495 return; /* Try next object */
1496
1497 raw_spin_lock_irq(&kmemleak_lock);
1498 if (object->del_state & DELSTATE_REMOVED)
1499 goto unlock_put; /* Object removed */
1500 object->del_state |= DELSTATE_NO_DELETE;
1501 raw_spin_unlock_irq(&kmemleak_lock);
1502
1503 rcu_read_unlock();
1504 cond_resched();
1505 rcu_read_lock();
1506
1507 raw_spin_lock_irq(&kmemleak_lock);
1508 if (object->del_state & DELSTATE_REMOVED)
1509 list_del_rcu(&object->object_list);
1510 object->del_state &= ~DELSTATE_NO_DELETE;
1511 unlock_put:
1512 raw_spin_unlock_irq(&kmemleak_lock);
1513 put_object(object);
1514 }
1515
1516 /*
1517 * Scan data sections and all the referenced memory blocks allocated via the
1518 * kernel's standard allocators. This function must be called with the
1519 * scan_mutex held.
1520 */
1521 static void kmemleak_scan(void)
1522 {
1523 struct kmemleak_object *object;
1524 struct zone *zone;
1525 int __maybe_unused i;
1526 int new_leaks = 0;
1527
1528 jiffies_last_scan = jiffies;
1529
1530 /* prepare the kmemleak_object's */
1531 rcu_read_lock();
1532 list_for_each_entry_rcu(object, &object_list, object_list) {
1533 raw_spin_lock_irq(&object->lock);
1534 #ifdef DEBUG
1535 /*
1536 * With a few exceptions there should be a maximum of
1537 * 1 reference to any object at this point.
1538 */
1539 if (atomic_read(&object->use_count) > 1) {
1540 pr_debug("object->use_count = %d\n",
1541 atomic_read(&object->use_count));
1542 dump_object_info(object);
1543 }
1544 #endif
1545
1546 /* ignore objects outside lowmem (paint them black) */
1547 if ((object->flags & OBJECT_PHYS) &&
1548 !(object->flags & OBJECT_NO_SCAN)) {
1549 unsigned long phys = object->pointer;
1550
1551 if (PHYS_PFN(phys) < min_low_pfn ||
1552 PHYS_PFN(phys + object->size) >= max_low_pfn)
1553 __paint_it(object, KMEMLEAK_BLACK);
1554 }
1555
1556 /* reset the reference count (whiten the object) */
1557 object->count = 0;
1558 if (color_gray(object) && get_object(object))
1559 list_add_tail(&object->gray_list, &gray_list);
1560
1561 raw_spin_unlock_irq(&object->lock);
1562
1563 if (need_resched())
1564 kmemleak_cond_resched(object);
1565 }
1566 rcu_read_unlock();
1567
1568 #ifdef CONFIG_SMP
1569 /* per-cpu sections scanning */
1570 for_each_possible_cpu(i)
1571 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1572 __per_cpu_end + per_cpu_offset(i));
1573 #endif
1574
1575 /*
1576 * Struct page scanning for each node.
1577 */
1578 get_online_mems();
1579 for_each_populated_zone(zone) {
1580 unsigned long start_pfn = zone->zone_start_pfn;
1581 unsigned long end_pfn = zone_end_pfn(zone);
1582 unsigned long pfn;
1583
1584 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1585 struct page *page = pfn_to_online_page(pfn);
1586
1587 if (!(pfn & 63))
1588 cond_resched();
1589
1590 if (!page)
1591 continue;
1592
1593 /* only scan pages belonging to this zone */
1594 if (page_zone(page) != zone)
1595 continue;
1596 /* only scan if page is in use */
1597 if (page_count(page) == 0)
1598 continue;
1599 scan_block(page, page + 1, NULL);
1600 }
1601 }
1602 put_online_mems();
1603
1604 /*
1605 * Scanning the task stacks (may introduce false negatives).
1606 */
1607 if (kmemleak_stack_scan) {
1608 struct task_struct *p, *g;
1609
1610 rcu_read_lock();
1611 for_each_process_thread(g, p) {
1612 void *stack = try_get_task_stack(p);
1613 if (stack) {
1614 scan_block(stack, stack + THREAD_SIZE, NULL);
1615 put_task_stack(p);
1616 }
1617 }
1618 rcu_read_unlock();
1619 }
1620
1621 /*
1622 * Scan the objects already referenced from the sections scanned
1623 * above.
1624 */
1625 scan_gray_list();
1626
1627 /*
1628 * Check for new or unreferenced objects modified since the previous
1629 * scan and color them gray until the next scan.
1630 */
1631 rcu_read_lock();
1632 list_for_each_entry_rcu(object, &object_list, object_list) {
1633 if (need_resched())
1634 kmemleak_cond_resched(object);
1635
1636 /*
1637 * This is racy but we can save the overhead of lock/unlock
1638 * calls. The missed objects, if any, should be caught in
1639 * the next scan.
1640 */
1641 if (!color_white(object))
1642 continue;
1643 raw_spin_lock_irq(&object->lock);
1644 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1645 && update_checksum(object) && get_object(object)) {
1646 /* color it gray temporarily */
1647 object->count = object->min_count;
1648 list_add_tail(&object->gray_list, &gray_list);
1649 }
1650 raw_spin_unlock_irq(&object->lock);
1651 }
1652 rcu_read_unlock();
1653
1654 /*
1655 * Re-scan the gray list for modified unreferenced objects.
1656 */
1657 scan_gray_list();
1658
1659 /*
1660 * If scanning was stopped do not report any new unreferenced objects.
1661 */
1662 if (scan_should_stop())
1663 return;
1664
1665 /*
1666 * Scanning result reporting.
1667 */
1668 rcu_read_lock();
1669 list_for_each_entry_rcu(object, &object_list, object_list) {
1670 if (need_resched())
1671 kmemleak_cond_resched(object);
1672
1673 /*
1674 * This is racy but we can save the overhead of lock/unlock
1675 * calls. The missed objects, if any, should be caught in
1676 * the next scan.
1677 */
1678 if (!color_white(object))
1679 continue;
1680 raw_spin_lock_irq(&object->lock);
1681 if (unreferenced_object(object) &&
1682 !(object->flags & OBJECT_REPORTED)) {
1683 object->flags |= OBJECT_REPORTED;
1684
1685 if (kmemleak_verbose)
1686 print_unreferenced(NULL, object);
1687
1688 new_leaks++;
1689 }
1690 raw_spin_unlock_irq(&object->lock);
1691 }
1692 rcu_read_unlock();
1693
1694 if (new_leaks) {
1695 kmemleak_found_leaks = true;
1696
1697 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1698 new_leaks);
1699 }
1700
1701 }
1702
1703 /*
1704 * Thread function performing automatic memory scanning. Unreferenced objects
1705 * at the end of a memory scan are reported but only the first time.
1706 */
1707 static int kmemleak_scan_thread(void *arg)
1708 {
1709 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1710
1711 pr_info("Automatic memory scanning thread started\n");
1712 set_user_nice(current, 10);
1713
1714 /*
1715 * Wait before the first scan to allow the system to fully initialize.
1716 */
1717 if (first_run) {
1718 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1719 first_run = 0;
1720 while (timeout && !kthread_should_stop())
1721 timeout = schedule_timeout_interruptible(timeout);
1722 }
1723
1724 while (!kthread_should_stop()) {
1725 signed long timeout = READ_ONCE(jiffies_scan_wait);
1726
1727 mutex_lock(&scan_mutex);
1728 kmemleak_scan();
1729 mutex_unlock(&scan_mutex);
1730
1731 /* wait before the next scan */
1732 while (timeout && !kthread_should_stop())
1733 timeout = schedule_timeout_interruptible(timeout);
1734 }
1735
1736 pr_info("Automatic memory scanning thread ended\n");
1737
1738 return 0;
1739 }
1740
1741 /*
1742 * Start the automatic memory scanning thread. This function must be called
1743 * with the scan_mutex held.
1744 */
1745 static void start_scan_thread(void)
1746 {
1747 if (scan_thread)
1748 return;
1749 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1750 if (IS_ERR(scan_thread)) {
1751 pr_warn("Failed to create the scan thread\n");
1752 scan_thread = NULL;
1753 }
1754 }
1755
1756 /*
1757 * Stop the automatic memory scanning thread.
1758 */
1759 static void stop_scan_thread(void)
1760 {
1761 if (scan_thread) {
1762 kthread_stop(scan_thread);
1763 scan_thread = NULL;
1764 }
1765 }
1766
1767 /*
1768 * Iterate over the object_list and return the first valid object at or after
1769 * the required position with its use_count incremented. The function triggers
1770 * a memory scanning when the pos argument points to the first position.
1771 */
1772 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1773 {
1774 struct kmemleak_object *object;
1775 loff_t n = *pos;
1776 int err;
1777
1778 err = mutex_lock_interruptible(&scan_mutex);
1779 if (err < 0)
1780 return ERR_PTR(err);
1781
1782 rcu_read_lock();
1783 list_for_each_entry_rcu(object, &object_list, object_list) {
1784 if (n-- > 0)
1785 continue;
1786 if (get_object(object))
1787 goto out;
1788 }
1789 object = NULL;
1790 out:
1791 return object;
1792 }
1793
1794 /*
1795 * Return the next object in the object_list. The function decrements the
1796 * use_count of the previous object and increases that of the next one.
1797 */
1798 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1799 {
1800 struct kmemleak_object *prev_obj = v;
1801 struct kmemleak_object *next_obj = NULL;
1802 struct kmemleak_object *obj = prev_obj;
1803
1804 ++(*pos);
1805
1806 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1807 if (get_object(obj)) {
1808 next_obj = obj;
1809 break;
1810 }
1811 }
1812
1813 put_object(prev_obj);
1814 return next_obj;
1815 }
1816
1817 /*
1818 * Decrement the use_count of the last object required, if any.
1819 */
1820 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1821 {
1822 if (!IS_ERR(v)) {
1823 /*
1824 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1825 * waiting was interrupted, so only release it if !IS_ERR.
1826 */
1827 rcu_read_unlock();
1828 mutex_unlock(&scan_mutex);
1829 if (v)
1830 put_object(v);
1831 }
1832 }
1833
1834 /*
1835 * Print the information for an unreferenced object to the seq file.
1836 */
1837 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1838 {
1839 struct kmemleak_object *object = v;
1840 unsigned long flags;
1841
1842 raw_spin_lock_irqsave(&object->lock, flags);
1843 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1844 print_unreferenced(seq, object);
1845 raw_spin_unlock_irqrestore(&object->lock, flags);
1846 return 0;
1847 }
1848
1849 static const struct seq_operations kmemleak_seq_ops = {
1850 .start = kmemleak_seq_start,
1851 .next = kmemleak_seq_next,
1852 .stop = kmemleak_seq_stop,
1853 .show = kmemleak_seq_show,
1854 };
1855
1856 static int kmemleak_open(struct inode *inode, struct file *file)
1857 {
1858 return seq_open(file, &kmemleak_seq_ops);
1859 }
1860
1861 static int dump_str_object_info(const char *str)
1862 {
1863 unsigned long flags;
1864 struct kmemleak_object *object;
1865 unsigned long addr;
1866
1867 if (kstrtoul(str, 0, &addr))
1868 return -EINVAL;
1869 object = find_and_get_object(addr, 0);
1870 if (!object) {
1871 pr_info("Unknown object at 0x%08lx\n", addr);
1872 return -EINVAL;
1873 }
1874
1875 raw_spin_lock_irqsave(&object->lock, flags);
1876 dump_object_info(object);
1877 raw_spin_unlock_irqrestore(&object->lock, flags);
1878
1879 put_object(object);
1880 return 0;
1881 }
1882
1883 /*
1884 * We use grey instead of black to ensure we can do future scans on the same
1885 * objects. If we did not do future scans these black objects could
1886 * potentially contain references to newly allocated objects in the future and
1887 * we'd end up with false positives.
1888 */
1889 static void kmemleak_clear(void)
1890 {
1891 struct kmemleak_object *object;
1892
1893 rcu_read_lock();
1894 list_for_each_entry_rcu(object, &object_list, object_list) {
1895 raw_spin_lock_irq(&object->lock);
1896 if ((object->flags & OBJECT_REPORTED) &&
1897 unreferenced_object(object))
1898 __paint_it(object, KMEMLEAK_GREY);
1899 raw_spin_unlock_irq(&object->lock);
1900 }
1901 rcu_read_unlock();
1902
1903 kmemleak_found_leaks = false;
1904 }
1905
1906 static void __kmemleak_do_cleanup(void);
1907
1908 /*
1909 * File write operation to configure kmemleak at run-time. The following
1910 * commands can be written to the /sys/kernel/debug/kmemleak file:
1911 * off - disable kmemleak (irreversible)
1912 * stack=on - enable the task stacks scanning
1913 * stack=off - disable the tasks stacks scanning
1914 * scan=on - start the automatic memory scanning thread
1915 * scan=off - stop the automatic memory scanning thread
1916 * scan=... - set the automatic memory scanning period in seconds (0 to
1917 * disable it)
1918 * scan - trigger a memory scan
1919 * clear - mark all current reported unreferenced kmemleak objects as
1920 * grey to ignore printing them, or free all kmemleak objects
1921 * if kmemleak has been disabled.
1922 * dump=... - dump information about the object found at the given address
1923 */
1924 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1925 size_t size, loff_t *ppos)
1926 {
1927 char buf[64];
1928 int buf_size;
1929 int ret;
1930
1931 buf_size = min(size, (sizeof(buf) - 1));
1932 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1933 return -EFAULT;
1934 buf[buf_size] = 0;
1935
1936 ret = mutex_lock_interruptible(&scan_mutex);
1937 if (ret < 0)
1938 return ret;
1939
1940 if (strncmp(buf, "clear", 5) == 0) {
1941 if (kmemleak_enabled)
1942 kmemleak_clear();
1943 else
1944 __kmemleak_do_cleanup();
1945 goto out;
1946 }
1947
1948 if (!kmemleak_enabled) {
1949 ret = -EPERM;
1950 goto out;
1951 }
1952
1953 if (strncmp(buf, "off", 3) == 0)
1954 kmemleak_disable();
1955 else if (strncmp(buf, "stack=on", 8) == 0)
1956 kmemleak_stack_scan = 1;
1957 else if (strncmp(buf, "stack=off", 9) == 0)
1958 kmemleak_stack_scan = 0;
1959 else if (strncmp(buf, "scan=on", 7) == 0)
1960 start_scan_thread();
1961 else if (strncmp(buf, "scan=off", 8) == 0)
1962 stop_scan_thread();
1963 else if (strncmp(buf, "scan=", 5) == 0) {
1964 unsigned secs;
1965 unsigned long msecs;
1966
1967 ret = kstrtouint(buf + 5, 0, &secs);
1968 if (ret < 0)
1969 goto out;
1970
1971 msecs = secs * MSEC_PER_SEC;
1972 if (msecs > UINT_MAX)
1973 msecs = UINT_MAX;
1974
1975 stop_scan_thread();
1976 if (msecs) {
1977 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1978 start_scan_thread();
1979 }
1980 } else if (strncmp(buf, "scan", 4) == 0)
1981 kmemleak_scan();
1982 else if (strncmp(buf, "dump=", 5) == 0)
1983 ret = dump_str_object_info(buf + 5);
1984 else
1985 ret = -EINVAL;
1986
1987 out:
1988 mutex_unlock(&scan_mutex);
1989 if (ret < 0)
1990 return ret;
1991
1992 /* ignore the rest of the buffer, only one command at a time */
1993 *ppos += size;
1994 return size;
1995 }
1996
1997 static const struct file_operations kmemleak_fops = {
1998 .owner = THIS_MODULE,
1999 .open = kmemleak_open,
2000 .read = seq_read,
2001 .write = kmemleak_write,
2002 .llseek = seq_lseek,
2003 .release = seq_release,
2004 };
2005
2006 static void __kmemleak_do_cleanup(void)
2007 {
2008 struct kmemleak_object *object, *tmp;
2009
2010 /*
2011 * Kmemleak has already been disabled, no need for RCU list traversal
2012 * or kmemleak_lock held.
2013 */
2014 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2015 __remove_object(object);
2016 __delete_object(object);
2017 }
2018 }
2019
2020 /*
2021 * Stop the memory scanning thread and free the kmemleak internal objects if
2022 * no previous scan thread (otherwise, kmemleak may still have some useful
2023 * information on memory leaks).
2024 */
2025 static void kmemleak_do_cleanup(struct work_struct *work)
2026 {
2027 stop_scan_thread();
2028
2029 mutex_lock(&scan_mutex);
2030 /*
2031 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2032 * longer track object freeing. Ordering of the scan thread stopping and
2033 * the memory accesses below is guaranteed by the kthread_stop()
2034 * function.
2035 */
2036 kmemleak_free_enabled = 0;
2037 mutex_unlock(&scan_mutex);
2038
2039 if (!kmemleak_found_leaks)
2040 __kmemleak_do_cleanup();
2041 else
2042 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2043 }
2044
2045 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2046
2047 /*
2048 * Disable kmemleak. No memory allocation/freeing will be traced once this
2049 * function is called. Disabling kmemleak is an irreversible operation.
2050 */
2051 static void kmemleak_disable(void)
2052 {
2053 /* atomically check whether it was already invoked */
2054 if (cmpxchg(&kmemleak_error, 0, 1))
2055 return;
2056
2057 /* stop any memory operation tracing */
2058 kmemleak_enabled = 0;
2059
2060 /* check whether it is too early for a kernel thread */
2061 if (kmemleak_late_initialized)
2062 schedule_work(&cleanup_work);
2063 else
2064 kmemleak_free_enabled = 0;
2065
2066 pr_info("Kernel memory leak detector disabled\n");
2067 }
2068
2069 /*
2070 * Allow boot-time kmemleak disabling (enabled by default).
2071 */
2072 static int __init kmemleak_boot_config(char *str)
2073 {
2074 if (!str)
2075 return -EINVAL;
2076 if (strcmp(str, "off") == 0)
2077 kmemleak_disable();
2078 else if (strcmp(str, "on") == 0) {
2079 kmemleak_skip_disable = 1;
2080 stack_depot_request_early_init();
2081 }
2082 else
2083 return -EINVAL;
2084 return 0;
2085 }
2086 early_param("kmemleak", kmemleak_boot_config);
2087
2088 /*
2089 * Kmemleak initialization.
2090 */
2091 void __init kmemleak_init(void)
2092 {
2093 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2094 if (!kmemleak_skip_disable) {
2095 kmemleak_disable();
2096 return;
2097 }
2098 #endif
2099
2100 if (kmemleak_error)
2101 return;
2102
2103 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2104 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2105
2106 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2107 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2108
2109 /* register the data/bss sections */
2110 create_object((unsigned long)_sdata, _edata - _sdata,
2111 KMEMLEAK_GREY, GFP_ATOMIC);
2112 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2113 KMEMLEAK_GREY, GFP_ATOMIC);
2114 /* only register .data..ro_after_init if not within .data */
2115 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2116 create_object((unsigned long)__start_ro_after_init,
2117 __end_ro_after_init - __start_ro_after_init,
2118 KMEMLEAK_GREY, GFP_ATOMIC);
2119 }
2120
2121 /*
2122 * Late initialization function.
2123 */
2124 static int __init kmemleak_late_init(void)
2125 {
2126 kmemleak_late_initialized = 1;
2127
2128 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2129
2130 if (kmemleak_error) {
2131 /*
2132 * Some error occurred and kmemleak was disabled. There is a
2133 * small chance that kmemleak_disable() was called immediately
2134 * after setting kmemleak_late_initialized and we may end up with
2135 * two clean-up threads but serialized by scan_mutex.
2136 */
2137 schedule_work(&cleanup_work);
2138 return -ENOMEM;
2139 }
2140
2141 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2142 mutex_lock(&scan_mutex);
2143 start_scan_thread();
2144 mutex_unlock(&scan_mutex);
2145 }
2146
2147 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2148 mem_pool_free_count);
2149
2150 return 0;
2151 }
2152 late_initcall(kmemleak_late_init);