]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/kmemleak.c
mm: kmemleak: factor object reference updating out of scan_block()
[thirdparty/linux.git] / mm / kmemleak.c
CommitLineData
3c7b4e6b
CM
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22901c6c 22 * Documentation/dev-tools/kmemleak.rst.
3c7b4e6b
CM
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
85d3a316 32 * blocks. The object_tree_root is a red black tree used to look-up
3c7b4e6b
CM
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
4698c1f2
CM
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
3c7b4e6b 55 *
93ada579 56 * Locks and mutexes are acquired/nested in the following order:
9d5a4c73 57 *
93ada579
CM
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59 *
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
9d5a4c73 62 *
3c7b4e6b
CM
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
69 */
70
ae281064
JP
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
3c7b4e6b
CM
73#include <linux/init.h>
74#include <linux/kernel.h>
75#include <linux/list.h>
3f07c014 76#include <linux/sched/signal.h>
29930025 77#include <linux/sched/task.h>
68db0cf1 78#include <linux/sched/task_stack.h>
3c7b4e6b
CM
79#include <linux/jiffies.h>
80#include <linux/delay.h>
b95f1b31 81#include <linux/export.h>
3c7b4e6b 82#include <linux/kthread.h>
85d3a316 83#include <linux/rbtree.h>
3c7b4e6b
CM
84#include <linux/fs.h>
85#include <linux/debugfs.h>
86#include <linux/seq_file.h>
87#include <linux/cpumask.h>
88#include <linux/spinlock.h>
89#include <linux/mutex.h>
90#include <linux/rcupdate.h>
91#include <linux/stacktrace.h>
92#include <linux/cache.h>
93#include <linux/percpu.h>
94#include <linux/hardirq.h>
9099daed
CM
95#include <linux/bootmem.h>
96#include <linux/pfn.h>
3c7b4e6b
CM
97#include <linux/mmzone.h>
98#include <linux/slab.h>
99#include <linux/thread_info.h>
100#include <linux/err.h>
101#include <linux/uaccess.h>
102#include <linux/string.h>
103#include <linux/nodemask.h>
104#include <linux/mm.h>
179a8100 105#include <linux/workqueue.h>
04609ccc 106#include <linux/crc32.h>
3c7b4e6b
CM
107
108#include <asm/sections.h>
109#include <asm/processor.h>
60063497 110#include <linux/atomic.h>
3c7b4e6b 111
e79ed2f1 112#include <linux/kasan.h>
8e019366 113#include <linux/kmemcheck.h>
3c7b4e6b 114#include <linux/kmemleak.h>
029aeff5 115#include <linux/memory_hotplug.h>
3c7b4e6b
CM
116
117/*
118 * Kmemleak configuration and common defines.
119 */
120#define MAX_TRACE 16 /* stack trace length */
3c7b4e6b 121#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
3c7b4e6b
CM
122#define SECS_FIRST_SCAN 60 /* delay before the first scan */
123#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
af98603d 124#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
3c7b4e6b
CM
125
126#define BYTES_PER_POINTER sizeof(void *)
127
216c04b0 128/* GFP bitmask for kmemleak internal allocations */
20b5c303 129#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
6ae4bd1f
CM
130 __GFP_NORETRY | __GFP_NOMEMALLOC | \
131 __GFP_NOWARN)
216c04b0 132
3c7b4e6b
CM
133/* scanning area inside a memory block */
134struct kmemleak_scan_area {
135 struct hlist_node node;
c017b4be
CM
136 unsigned long start;
137 size_t size;
3c7b4e6b
CM
138};
139
a1084c87
LR
140#define KMEMLEAK_GREY 0
141#define KMEMLEAK_BLACK -1
142
3c7b4e6b
CM
143/*
144 * Structure holding the metadata for each allocated memory block.
145 * Modifications to such objects should be made while holding the
146 * object->lock. Insertions or deletions from object_list, gray_list or
85d3a316 147 * rb_node are already protected by the corresponding locks or mutex (see
3c7b4e6b
CM
148 * the notes on locking above). These objects are reference-counted
149 * (use_count) and freed using the RCU mechanism.
150 */
151struct kmemleak_object {
152 spinlock_t lock;
f66abf09 153 unsigned int flags; /* object status flags */
3c7b4e6b
CM
154 struct list_head object_list;
155 struct list_head gray_list;
85d3a316 156 struct rb_node rb_node;
3c7b4e6b
CM
157 struct rcu_head rcu; /* object_list lockless traversal */
158 /* object usage count; object freed when use_count == 0 */
159 atomic_t use_count;
160 unsigned long pointer;
161 size_t size;
162 /* minimum number of a pointers found before it is considered leak */
163 int min_count;
164 /* the total number of pointers found pointing to this object */
165 int count;
04609ccc
CM
166 /* checksum for detecting modified objects */
167 u32 checksum;
3c7b4e6b
CM
168 /* memory ranges to be scanned inside an object (empty for all) */
169 struct hlist_head area_list;
170 unsigned long trace[MAX_TRACE];
171 unsigned int trace_len;
172 unsigned long jiffies; /* creation timestamp */
173 pid_t pid; /* pid of the current task */
174 char comm[TASK_COMM_LEN]; /* executable name */
175};
176
177/* flag representing the memory block allocation status */
178#define OBJECT_ALLOCATED (1 << 0)
179/* flag set after the first reporting of an unreference object */
180#define OBJECT_REPORTED (1 << 1)
181/* flag set to not scan the object */
182#define OBJECT_NO_SCAN (1 << 2)
183
0494e082
SS
184/* number of bytes to print per line; must be 16 or 32 */
185#define HEX_ROW_SIZE 16
186/* number of bytes to print at a time (1, 2, 4, 8) */
187#define HEX_GROUP_SIZE 1
188/* include ASCII after the hex output */
189#define HEX_ASCII 1
190/* max number of lines to be printed */
191#define HEX_MAX_LINES 2
192
3c7b4e6b
CM
193/* the list of all allocated objects */
194static LIST_HEAD(object_list);
195/* the list of gray-colored objects (see color_gray comment below) */
196static LIST_HEAD(gray_list);
85d3a316
ML
197/* search tree for object boundaries */
198static struct rb_root object_tree_root = RB_ROOT;
199/* rw_lock protecting the access to object_list and object_tree_root */
3c7b4e6b
CM
200static DEFINE_RWLOCK(kmemleak_lock);
201
202/* allocation caches for kmemleak internal data */
203static struct kmem_cache *object_cache;
204static struct kmem_cache *scan_area_cache;
205
206/* set if tracing memory operations is enabled */
8910ae89 207static int kmemleak_enabled;
c5f3b1a5
CM
208/* same as above but only for the kmemleak_free() callback */
209static int kmemleak_free_enabled;
3c7b4e6b 210/* set in the late_initcall if there were no errors */
8910ae89 211static int kmemleak_initialized;
3c7b4e6b 212/* enables or disables early logging of the memory operations */
8910ae89 213static int kmemleak_early_log = 1;
5f79020c 214/* set if a kmemleak warning was issued */
8910ae89 215static int kmemleak_warning;
5f79020c 216/* set if a fatal kmemleak error has occurred */
8910ae89 217static int kmemleak_error;
3c7b4e6b
CM
218
219/* minimum and maximum address that may be valid pointers */
220static unsigned long min_addr = ULONG_MAX;
221static unsigned long max_addr;
222
3c7b4e6b 223static struct task_struct *scan_thread;
acf4968e 224/* used to avoid reporting of recently allocated objects */
3c7b4e6b 225static unsigned long jiffies_min_age;
acf4968e 226static unsigned long jiffies_last_scan;
3c7b4e6b
CM
227/* delay between automatic memory scannings */
228static signed long jiffies_scan_wait;
229/* enables or disables the task stacks scanning */
e0a2a160 230static int kmemleak_stack_scan = 1;
4698c1f2 231/* protects the memory scanning, parameters and debug/kmemleak file access */
3c7b4e6b 232static DEFINE_MUTEX(scan_mutex);
ab0155a2
JB
233/* setting kmemleak=on, will set this var, skipping the disable */
234static int kmemleak_skip_disable;
dc9b3f42
LZ
235/* If there are leaks that can be reported */
236static bool kmemleak_found_leaks;
3c7b4e6b 237
3c7b4e6b 238/*
2030117d 239 * Early object allocation/freeing logging. Kmemleak is initialized after the
3c7b4e6b 240 * kernel allocator. However, both the kernel allocator and kmemleak may
2030117d 241 * allocate memory blocks which need to be tracked. Kmemleak defines an
3c7b4e6b
CM
242 * arbitrary buffer to hold the allocation/freeing information before it is
243 * fully initialized.
244 */
245
246/* kmemleak operation type for early logging */
247enum {
248 KMEMLEAK_ALLOC,
f528f0b8 249 KMEMLEAK_ALLOC_PERCPU,
3c7b4e6b 250 KMEMLEAK_FREE,
53238a60 251 KMEMLEAK_FREE_PART,
f528f0b8 252 KMEMLEAK_FREE_PERCPU,
3c7b4e6b
CM
253 KMEMLEAK_NOT_LEAK,
254 KMEMLEAK_IGNORE,
255 KMEMLEAK_SCAN_AREA,
256 KMEMLEAK_NO_SCAN
257};
258
259/*
260 * Structure holding the information passed to kmemleak callbacks during the
261 * early logging.
262 */
263struct early_log {
264 int op_type; /* kmemleak operation type */
f66abf09 265 int min_count; /* minimum reference count */
3c7b4e6b
CM
266 const void *ptr; /* allocated/freed memory block */
267 size_t size; /* memory block size */
fd678967
CM
268 unsigned long trace[MAX_TRACE]; /* stack trace */
269 unsigned int trace_len; /* stack trace length */
3c7b4e6b
CM
270};
271
272/* early logging buffer and current position */
a6186d89
CM
273static struct early_log
274 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
275static int crt_early_log __initdata;
3c7b4e6b
CM
276
277static void kmemleak_disable(void);
278
279/*
280 * Print a warning and dump the stack trace.
281 */
5f79020c 282#define kmemleak_warn(x...) do { \
598d8091 283 pr_warn(x); \
5f79020c 284 dump_stack(); \
8910ae89 285 kmemleak_warning = 1; \
3c7b4e6b
CM
286} while (0)
287
288/*
25985edc 289 * Macro invoked when a serious kmemleak condition occurred and cannot be
2030117d 290 * recovered from. Kmemleak will be disabled and further allocation/freeing
3c7b4e6b
CM
291 * tracing no longer available.
292 */
000814f4 293#define kmemleak_stop(x...) do { \
3c7b4e6b
CM
294 kmemleak_warn(x); \
295 kmemleak_disable(); \
296} while (0)
297
0494e082
SS
298/*
299 * Printing of the objects hex dump to the seq file. The number of lines to be
300 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
301 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
302 * with the object->lock held.
303 */
304static void hex_dump_object(struct seq_file *seq,
305 struct kmemleak_object *object)
306{
307 const u8 *ptr = (const u8 *)object->pointer;
6fc37c49 308 size_t len;
0494e082
SS
309
310 /* limit the number of lines to HEX_MAX_LINES */
6fc37c49 311 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
0494e082 312
6fc37c49 313 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
5c335fe0 314 kasan_disable_current();
6fc37c49
AS
315 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
316 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
5c335fe0 317 kasan_enable_current();
0494e082
SS
318}
319
3c7b4e6b
CM
320/*
321 * Object colors, encoded with count and min_count:
322 * - white - orphan object, not enough references to it (count < min_count)
323 * - gray - not orphan, not marked as false positive (min_count == 0) or
324 * sufficient references to it (count >= min_count)
325 * - black - ignore, it doesn't contain references (e.g. text section)
326 * (min_count == -1). No function defined for this color.
327 * Newly created objects don't have any color assigned (object->count == -1)
328 * before the next memory scan when they become white.
329 */
4a558dd6 330static bool color_white(const struct kmemleak_object *object)
3c7b4e6b 331{
a1084c87
LR
332 return object->count != KMEMLEAK_BLACK &&
333 object->count < object->min_count;
3c7b4e6b
CM
334}
335
4a558dd6 336static bool color_gray(const struct kmemleak_object *object)
3c7b4e6b 337{
a1084c87
LR
338 return object->min_count != KMEMLEAK_BLACK &&
339 object->count >= object->min_count;
3c7b4e6b
CM
340}
341
3c7b4e6b
CM
342/*
343 * Objects are considered unreferenced only if their color is white, they have
344 * not be deleted and have a minimum age to avoid false positives caused by
345 * pointers temporarily stored in CPU registers.
346 */
4a558dd6 347static bool unreferenced_object(struct kmemleak_object *object)
3c7b4e6b 348{
04609ccc 349 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
acf4968e
CM
350 time_before_eq(object->jiffies + jiffies_min_age,
351 jiffies_last_scan);
3c7b4e6b
CM
352}
353
354/*
bab4a34a
CM
355 * Printing of the unreferenced objects information to the seq file. The
356 * print_unreferenced function must be called with the object->lock held.
3c7b4e6b 357 */
3c7b4e6b
CM
358static void print_unreferenced(struct seq_file *seq,
359 struct kmemleak_object *object)
360{
361 int i;
fefdd336 362 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
3c7b4e6b 363
bab4a34a
CM
364 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
365 object->pointer, object->size);
fefdd336
CM
366 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
367 object->comm, object->pid, object->jiffies,
368 msecs_age / 1000, msecs_age % 1000);
0494e082 369 hex_dump_object(seq, object);
bab4a34a 370 seq_printf(seq, " backtrace:\n");
3c7b4e6b
CM
371
372 for (i = 0; i < object->trace_len; i++) {
373 void *ptr = (void *)object->trace[i];
bab4a34a 374 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
3c7b4e6b
CM
375 }
376}
377
378/*
379 * Print the kmemleak_object information. This function is used mainly for
380 * debugging special cases when kmemleak operations. It must be called with
381 * the object->lock held.
382 */
383static void dump_object_info(struct kmemleak_object *object)
384{
385 struct stack_trace trace;
386
387 trace.nr_entries = object->trace_len;
388 trace.entries = object->trace;
389
ae281064 390 pr_notice("Object 0x%08lx (size %zu):\n",
85d3a316 391 object->pointer, object->size);
3c7b4e6b
CM
392 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
393 object->comm, object->pid, object->jiffies);
394 pr_notice(" min_count = %d\n", object->min_count);
395 pr_notice(" count = %d\n", object->count);
f66abf09 396 pr_notice(" flags = 0x%x\n", object->flags);
aae0ad7a 397 pr_notice(" checksum = %u\n", object->checksum);
3c7b4e6b
CM
398 pr_notice(" backtrace:\n");
399 print_stack_trace(&trace, 4);
400}
401
402/*
85d3a316 403 * Look-up a memory block metadata (kmemleak_object) in the object search
3c7b4e6b
CM
404 * tree based on a pointer value. If alias is 0, only values pointing to the
405 * beginning of the memory block are allowed. The kmemleak_lock must be held
406 * when calling this function.
407 */
408static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
409{
85d3a316
ML
410 struct rb_node *rb = object_tree_root.rb_node;
411
412 while (rb) {
413 struct kmemleak_object *object =
414 rb_entry(rb, struct kmemleak_object, rb_node);
415 if (ptr < object->pointer)
416 rb = object->rb_node.rb_left;
417 else if (object->pointer + object->size <= ptr)
418 rb = object->rb_node.rb_right;
419 else if (object->pointer == ptr || alias)
420 return object;
421 else {
5f79020c
CM
422 kmemleak_warn("Found object by alias at 0x%08lx\n",
423 ptr);
a7686a45 424 dump_object_info(object);
85d3a316 425 break;
3c7b4e6b 426 }
85d3a316
ML
427 }
428 return NULL;
3c7b4e6b
CM
429}
430
431/*
432 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
433 * that once an object's use_count reached 0, the RCU freeing was already
434 * registered and the object should no longer be used. This function must be
435 * called under the protection of rcu_read_lock().
436 */
437static int get_object(struct kmemleak_object *object)
438{
439 return atomic_inc_not_zero(&object->use_count);
440}
441
442/*
443 * RCU callback to free a kmemleak_object.
444 */
445static void free_object_rcu(struct rcu_head *rcu)
446{
b67bfe0d 447 struct hlist_node *tmp;
3c7b4e6b
CM
448 struct kmemleak_scan_area *area;
449 struct kmemleak_object *object =
450 container_of(rcu, struct kmemleak_object, rcu);
451
452 /*
453 * Once use_count is 0 (guaranteed by put_object), there is no other
454 * code accessing this object, hence no need for locking.
455 */
b67bfe0d
SL
456 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
457 hlist_del(&area->node);
3c7b4e6b
CM
458 kmem_cache_free(scan_area_cache, area);
459 }
460 kmem_cache_free(object_cache, object);
461}
462
463/*
464 * Decrement the object use_count. Once the count is 0, free the object using
465 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
466 * delete_object() path, the delayed RCU freeing ensures that there is no
467 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
468 * is also possible.
469 */
470static void put_object(struct kmemleak_object *object)
471{
472 if (!atomic_dec_and_test(&object->use_count))
473 return;
474
475 /* should only get here after delete_object was called */
476 WARN_ON(object->flags & OBJECT_ALLOCATED);
477
478 call_rcu(&object->rcu, free_object_rcu);
479}
480
481/*
85d3a316 482 * Look up an object in the object search tree and increase its use_count.
3c7b4e6b
CM
483 */
484static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
485{
486 unsigned long flags;
9fbed254 487 struct kmemleak_object *object;
3c7b4e6b
CM
488
489 rcu_read_lock();
490 read_lock_irqsave(&kmemleak_lock, flags);
93ada579 491 object = lookup_object(ptr, alias);
3c7b4e6b
CM
492 read_unlock_irqrestore(&kmemleak_lock, flags);
493
494 /* check whether the object is still available */
495 if (object && !get_object(object))
496 object = NULL;
497 rcu_read_unlock();
498
499 return object;
500}
501
e781a9ab
CM
502/*
503 * Look up an object in the object search tree and remove it from both
504 * object_tree_root and object_list. The returned object's use_count should be
505 * at least 1, as initially set by create_object().
506 */
507static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
508{
509 unsigned long flags;
510 struct kmemleak_object *object;
511
512 write_lock_irqsave(&kmemleak_lock, flags);
513 object = lookup_object(ptr, alias);
514 if (object) {
515 rb_erase(&object->rb_node, &object_tree_root);
516 list_del_rcu(&object->object_list);
517 }
518 write_unlock_irqrestore(&kmemleak_lock, flags);
519
520 return object;
521}
522
fd678967
CM
523/*
524 * Save stack trace to the given array of MAX_TRACE size.
525 */
526static int __save_stack_trace(unsigned long *trace)
527{
528 struct stack_trace stack_trace;
529
530 stack_trace.max_entries = MAX_TRACE;
531 stack_trace.nr_entries = 0;
532 stack_trace.entries = trace;
533 stack_trace.skip = 2;
534 save_stack_trace(&stack_trace);
535
536 return stack_trace.nr_entries;
537}
538
3c7b4e6b
CM
539/*
540 * Create the metadata (struct kmemleak_object) corresponding to an allocated
541 * memory block and add it to the object_list and object_tree_root.
542 */
fd678967
CM
543static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
544 int min_count, gfp_t gfp)
3c7b4e6b
CM
545{
546 unsigned long flags;
85d3a316
ML
547 struct kmemleak_object *object, *parent;
548 struct rb_node **link, *rb_parent;
3c7b4e6b 549
6ae4bd1f 550 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
3c7b4e6b 551 if (!object) {
598d8091 552 pr_warn("Cannot allocate a kmemleak_object structure\n");
6ae4bd1f 553 kmemleak_disable();
fd678967 554 return NULL;
3c7b4e6b
CM
555 }
556
557 INIT_LIST_HEAD(&object->object_list);
558 INIT_LIST_HEAD(&object->gray_list);
559 INIT_HLIST_HEAD(&object->area_list);
560 spin_lock_init(&object->lock);
561 atomic_set(&object->use_count, 1);
04609ccc 562 object->flags = OBJECT_ALLOCATED;
3c7b4e6b
CM
563 object->pointer = ptr;
564 object->size = size;
565 object->min_count = min_count;
04609ccc 566 object->count = 0; /* white color initially */
3c7b4e6b 567 object->jiffies = jiffies;
04609ccc 568 object->checksum = 0;
3c7b4e6b
CM
569
570 /* task information */
571 if (in_irq()) {
572 object->pid = 0;
573 strncpy(object->comm, "hardirq", sizeof(object->comm));
574 } else if (in_softirq()) {
575 object->pid = 0;
576 strncpy(object->comm, "softirq", sizeof(object->comm));
577 } else {
578 object->pid = current->pid;
579 /*
580 * There is a small chance of a race with set_task_comm(),
581 * however using get_task_comm() here may cause locking
582 * dependency issues with current->alloc_lock. In the worst
583 * case, the command line is not correct.
584 */
585 strncpy(object->comm, current->comm, sizeof(object->comm));
586 }
587
588 /* kernel backtrace */
fd678967 589 object->trace_len = __save_stack_trace(object->trace);
3c7b4e6b 590
3c7b4e6b 591 write_lock_irqsave(&kmemleak_lock, flags);
0580a181 592
3c7b4e6b
CM
593 min_addr = min(min_addr, ptr);
594 max_addr = max(max_addr, ptr + size);
85d3a316
ML
595 link = &object_tree_root.rb_node;
596 rb_parent = NULL;
597 while (*link) {
598 rb_parent = *link;
599 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
600 if (ptr + size <= parent->pointer)
601 link = &parent->rb_node.rb_left;
602 else if (parent->pointer + parent->size <= ptr)
603 link = &parent->rb_node.rb_right;
604 else {
756a025f 605 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
85d3a316 606 ptr);
9d5a4c73
CM
607 /*
608 * No need for parent->lock here since "parent" cannot
609 * be freed while the kmemleak_lock is held.
610 */
611 dump_object_info(parent);
85d3a316 612 kmem_cache_free(object_cache, object);
9d5a4c73 613 object = NULL;
85d3a316
ML
614 goto out;
615 }
3c7b4e6b 616 }
85d3a316
ML
617 rb_link_node(&object->rb_node, rb_parent, link);
618 rb_insert_color(&object->rb_node, &object_tree_root);
619
3c7b4e6b
CM
620 list_add_tail_rcu(&object->object_list, &object_list);
621out:
622 write_unlock_irqrestore(&kmemleak_lock, flags);
fd678967 623 return object;
3c7b4e6b
CM
624}
625
626/*
e781a9ab 627 * Mark the object as not allocated and schedule RCU freeing via put_object().
3c7b4e6b 628 */
53238a60 629static void __delete_object(struct kmemleak_object *object)
3c7b4e6b
CM
630{
631 unsigned long flags;
3c7b4e6b 632
3c7b4e6b 633 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
e781a9ab 634 WARN_ON(atomic_read(&object->use_count) < 1);
3c7b4e6b
CM
635
636 /*
637 * Locking here also ensures that the corresponding memory block
638 * cannot be freed when it is being scanned.
639 */
640 spin_lock_irqsave(&object->lock, flags);
3c7b4e6b
CM
641 object->flags &= ~OBJECT_ALLOCATED;
642 spin_unlock_irqrestore(&object->lock, flags);
643 put_object(object);
644}
645
53238a60
CM
646/*
647 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
648 * delete it.
649 */
650static void delete_object_full(unsigned long ptr)
651{
652 struct kmemleak_object *object;
653
e781a9ab 654 object = find_and_remove_object(ptr, 0);
53238a60
CM
655 if (!object) {
656#ifdef DEBUG
657 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
658 ptr);
659#endif
660 return;
661 }
662 __delete_object(object);
53238a60
CM
663}
664
665/*
666 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
667 * delete it. If the memory block is partially freed, the function may create
668 * additional metadata for the remaining parts of the block.
669 */
670static void delete_object_part(unsigned long ptr, size_t size)
671{
672 struct kmemleak_object *object;
673 unsigned long start, end;
674
e781a9ab 675 object = find_and_remove_object(ptr, 1);
53238a60
CM
676 if (!object) {
677#ifdef DEBUG
756a025f
JP
678 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
679 ptr, size);
53238a60
CM
680#endif
681 return;
682 }
53238a60
CM
683
684 /*
685 * Create one or two objects that may result from the memory block
686 * split. Note that partial freeing is only done by free_bootmem() and
687 * this happens before kmemleak_init() is called. The path below is
688 * only executed during early log recording in kmemleak_init(), so
689 * GFP_KERNEL is enough.
690 */
691 start = object->pointer;
692 end = object->pointer + object->size;
693 if (ptr > start)
694 create_object(start, ptr - start, object->min_count,
695 GFP_KERNEL);
696 if (ptr + size < end)
697 create_object(ptr + size, end - ptr - size, object->min_count,
698 GFP_KERNEL);
699
e781a9ab 700 __delete_object(object);
53238a60 701}
a1084c87
LR
702
703static void __paint_it(struct kmemleak_object *object, int color)
704{
705 object->min_count = color;
706 if (color == KMEMLEAK_BLACK)
707 object->flags |= OBJECT_NO_SCAN;
708}
709
710static void paint_it(struct kmemleak_object *object, int color)
3c7b4e6b
CM
711{
712 unsigned long flags;
a1084c87
LR
713
714 spin_lock_irqsave(&object->lock, flags);
715 __paint_it(object, color);
716 spin_unlock_irqrestore(&object->lock, flags);
717}
718
719static void paint_ptr(unsigned long ptr, int color)
720{
3c7b4e6b
CM
721 struct kmemleak_object *object;
722
723 object = find_and_get_object(ptr, 0);
724 if (!object) {
756a025f
JP
725 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
726 ptr,
a1084c87
LR
727 (color == KMEMLEAK_GREY) ? "Grey" :
728 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
3c7b4e6b
CM
729 return;
730 }
a1084c87 731 paint_it(object, color);
3c7b4e6b
CM
732 put_object(object);
733}
734
a1084c87 735/*
145b64b9 736 * Mark an object permanently as gray-colored so that it can no longer be
a1084c87
LR
737 * reported as a leak. This is used in general to mark a false positive.
738 */
739static void make_gray_object(unsigned long ptr)
740{
741 paint_ptr(ptr, KMEMLEAK_GREY);
742}
743
3c7b4e6b
CM
744/*
745 * Mark the object as black-colored so that it is ignored from scans and
746 * reporting.
747 */
748static void make_black_object(unsigned long ptr)
749{
a1084c87 750 paint_ptr(ptr, KMEMLEAK_BLACK);
3c7b4e6b
CM
751}
752
753/*
754 * Add a scanning area to the object. If at least one such area is added,
755 * kmemleak will only scan these ranges rather than the whole memory block.
756 */
c017b4be 757static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
758{
759 unsigned long flags;
760 struct kmemleak_object *object;
761 struct kmemleak_scan_area *area;
762
c017b4be 763 object = find_and_get_object(ptr, 1);
3c7b4e6b 764 if (!object) {
ae281064
JP
765 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
766 ptr);
3c7b4e6b
CM
767 return;
768 }
769
6ae4bd1f 770 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
3c7b4e6b 771 if (!area) {
598d8091 772 pr_warn("Cannot allocate a scan area\n");
3c7b4e6b
CM
773 goto out;
774 }
775
776 spin_lock_irqsave(&object->lock, flags);
7f88f88f
CM
777 if (size == SIZE_MAX) {
778 size = object->pointer + object->size - ptr;
779 } else if (ptr + size > object->pointer + object->size) {
ae281064 780 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
3c7b4e6b
CM
781 dump_object_info(object);
782 kmem_cache_free(scan_area_cache, area);
783 goto out_unlock;
784 }
785
786 INIT_HLIST_NODE(&area->node);
c017b4be
CM
787 area->start = ptr;
788 area->size = size;
3c7b4e6b
CM
789
790 hlist_add_head(&area->node, &object->area_list);
791out_unlock:
792 spin_unlock_irqrestore(&object->lock, flags);
793out:
794 put_object(object);
795}
796
797/*
798 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
799 * pointer. Such object will not be scanned by kmemleak but references to it
800 * are searched.
801 */
802static void object_no_scan(unsigned long ptr)
803{
804 unsigned long flags;
805 struct kmemleak_object *object;
806
807 object = find_and_get_object(ptr, 0);
808 if (!object) {
ae281064 809 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
3c7b4e6b
CM
810 return;
811 }
812
813 spin_lock_irqsave(&object->lock, flags);
814 object->flags |= OBJECT_NO_SCAN;
815 spin_unlock_irqrestore(&object->lock, flags);
816 put_object(object);
817}
818
819/*
820 * Log an early kmemleak_* call to the early_log buffer. These calls will be
821 * processed later once kmemleak is fully initialized.
822 */
a6186d89 823static void __init log_early(int op_type, const void *ptr, size_t size,
c017b4be 824 int min_count)
3c7b4e6b
CM
825{
826 unsigned long flags;
827 struct early_log *log;
828
8910ae89 829 if (kmemleak_error) {
b6693005
CM
830 /* kmemleak stopped recording, just count the requests */
831 crt_early_log++;
832 return;
833 }
834
3c7b4e6b 835 if (crt_early_log >= ARRAY_SIZE(early_log)) {
21cd3a60 836 crt_early_log++;
a9d9058a 837 kmemleak_disable();
3c7b4e6b
CM
838 return;
839 }
840
841 /*
842 * There is no need for locking since the kernel is still in UP mode
843 * at this stage. Disabling the IRQs is enough.
844 */
845 local_irq_save(flags);
846 log = &early_log[crt_early_log];
847 log->op_type = op_type;
848 log->ptr = ptr;
849 log->size = size;
850 log->min_count = min_count;
5f79020c 851 log->trace_len = __save_stack_trace(log->trace);
3c7b4e6b
CM
852 crt_early_log++;
853 local_irq_restore(flags);
854}
855
fd678967
CM
856/*
857 * Log an early allocated block and populate the stack trace.
858 */
859static void early_alloc(struct early_log *log)
860{
861 struct kmemleak_object *object;
862 unsigned long flags;
863 int i;
864
8910ae89 865 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
fd678967
CM
866 return;
867
868 /*
869 * RCU locking needed to ensure object is not freed via put_object().
870 */
871 rcu_read_lock();
872 object = create_object((unsigned long)log->ptr, log->size,
c1bcd6b3 873 log->min_count, GFP_ATOMIC);
0d5d1aad
CM
874 if (!object)
875 goto out;
fd678967
CM
876 spin_lock_irqsave(&object->lock, flags);
877 for (i = 0; i < log->trace_len; i++)
878 object->trace[i] = log->trace[i];
879 object->trace_len = log->trace_len;
880 spin_unlock_irqrestore(&object->lock, flags);
0d5d1aad 881out:
fd678967
CM
882 rcu_read_unlock();
883}
884
f528f0b8
CM
885/*
886 * Log an early allocated block and populate the stack trace.
887 */
888static void early_alloc_percpu(struct early_log *log)
889{
890 unsigned int cpu;
891 const void __percpu *ptr = log->ptr;
892
893 for_each_possible_cpu(cpu) {
894 log->ptr = per_cpu_ptr(ptr, cpu);
895 early_alloc(log);
896 }
897}
898
a2b6bf63
CM
899/**
900 * kmemleak_alloc - register a newly allocated object
901 * @ptr: pointer to beginning of the object
902 * @size: size of the object
903 * @min_count: minimum number of references to this object. If during memory
904 * scanning a number of references less than @min_count is found,
905 * the object is reported as a memory leak. If @min_count is 0,
906 * the object is never reported as a leak. If @min_count is -1,
907 * the object is ignored (not scanned and not reported as a leak)
908 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
909 *
910 * This function is called from the kernel allocators when a new object
911 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
3c7b4e6b 912 */
a6186d89
CM
913void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
914 gfp_t gfp)
3c7b4e6b
CM
915{
916 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
917
8910ae89 918 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 919 create_object((unsigned long)ptr, size, min_count, gfp);
8910ae89 920 else if (kmemleak_early_log)
c017b4be 921 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
3c7b4e6b
CM
922}
923EXPORT_SYMBOL_GPL(kmemleak_alloc);
924
f528f0b8
CM
925/**
926 * kmemleak_alloc_percpu - register a newly allocated __percpu object
927 * @ptr: __percpu pointer to beginning of the object
928 * @size: size of the object
8a8c35fa 929 * @gfp: flags used for kmemleak internal memory allocations
f528f0b8
CM
930 *
931 * This function is called from the kernel percpu allocator when a new object
8a8c35fa 932 * (memory block) is allocated (alloc_percpu).
f528f0b8 933 */
8a8c35fa
LF
934void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
935 gfp_t gfp)
f528f0b8
CM
936{
937 unsigned int cpu;
938
939 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
940
941 /*
942 * Percpu allocations are only scanned and not reported as leaks
943 * (min_count is set to 0).
944 */
8910ae89 945 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
946 for_each_possible_cpu(cpu)
947 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
8a8c35fa 948 size, 0, gfp);
8910ae89 949 else if (kmemleak_early_log)
f528f0b8
CM
950 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
951}
952EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
953
a2b6bf63
CM
954/**
955 * kmemleak_free - unregister a previously registered object
956 * @ptr: pointer to beginning of the object
957 *
958 * This function is called from the kernel allocators when an object (memory
959 * block) is freed (kmem_cache_free, kfree, vfree etc.).
3c7b4e6b 960 */
a6186d89 961void __ref kmemleak_free(const void *ptr)
3c7b4e6b
CM
962{
963 pr_debug("%s(0x%p)\n", __func__, ptr);
964
c5f3b1a5 965 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
53238a60 966 delete_object_full((unsigned long)ptr);
8910ae89 967 else if (kmemleak_early_log)
c017b4be 968 log_early(KMEMLEAK_FREE, ptr, 0, 0);
3c7b4e6b
CM
969}
970EXPORT_SYMBOL_GPL(kmemleak_free);
971
a2b6bf63
CM
972/**
973 * kmemleak_free_part - partially unregister a previously registered object
974 * @ptr: pointer to the beginning or inside the object. This also
975 * represents the start of the range to be freed
976 * @size: size to be unregistered
977 *
978 * This function is called when only a part of a memory block is freed
979 * (usually from the bootmem allocator).
53238a60 980 */
a6186d89 981void __ref kmemleak_free_part(const void *ptr, size_t size)
53238a60
CM
982{
983 pr_debug("%s(0x%p)\n", __func__, ptr);
984
8910ae89 985 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
53238a60 986 delete_object_part((unsigned long)ptr, size);
8910ae89 987 else if (kmemleak_early_log)
c017b4be 988 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
53238a60
CM
989}
990EXPORT_SYMBOL_GPL(kmemleak_free_part);
991
f528f0b8
CM
992/**
993 * kmemleak_free_percpu - unregister a previously registered __percpu object
994 * @ptr: __percpu pointer to beginning of the object
995 *
996 * This function is called from the kernel percpu allocator when an object
997 * (memory block) is freed (free_percpu).
998 */
999void __ref kmemleak_free_percpu(const void __percpu *ptr)
1000{
1001 unsigned int cpu;
1002
1003 pr_debug("%s(0x%p)\n", __func__, ptr);
1004
c5f3b1a5 1005 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
1006 for_each_possible_cpu(cpu)
1007 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1008 cpu));
8910ae89 1009 else if (kmemleak_early_log)
f528f0b8
CM
1010 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1011}
1012EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1013
ffe2c748
CM
1014/**
1015 * kmemleak_update_trace - update object allocation stack trace
1016 * @ptr: pointer to beginning of the object
1017 *
1018 * Override the object allocation stack trace for cases where the actual
1019 * allocation place is not always useful.
1020 */
1021void __ref kmemleak_update_trace(const void *ptr)
1022{
1023 struct kmemleak_object *object;
1024 unsigned long flags;
1025
1026 pr_debug("%s(0x%p)\n", __func__, ptr);
1027
1028 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1029 return;
1030
1031 object = find_and_get_object((unsigned long)ptr, 1);
1032 if (!object) {
1033#ifdef DEBUG
1034 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1035 ptr);
1036#endif
1037 return;
1038 }
1039
1040 spin_lock_irqsave(&object->lock, flags);
1041 object->trace_len = __save_stack_trace(object->trace);
1042 spin_unlock_irqrestore(&object->lock, flags);
1043
1044 put_object(object);
1045}
1046EXPORT_SYMBOL(kmemleak_update_trace);
1047
a2b6bf63
CM
1048/**
1049 * kmemleak_not_leak - mark an allocated object as false positive
1050 * @ptr: pointer to beginning of the object
1051 *
1052 * Calling this function on an object will cause the memory block to no longer
1053 * be reported as leak and always be scanned.
3c7b4e6b 1054 */
a6186d89 1055void __ref kmemleak_not_leak(const void *ptr)
3c7b4e6b
CM
1056{
1057 pr_debug("%s(0x%p)\n", __func__, ptr);
1058
8910ae89 1059 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1060 make_gray_object((unsigned long)ptr);
8910ae89 1061 else if (kmemleak_early_log)
c017b4be 1062 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
3c7b4e6b
CM
1063}
1064EXPORT_SYMBOL(kmemleak_not_leak);
1065
a2b6bf63
CM
1066/**
1067 * kmemleak_ignore - ignore an allocated object
1068 * @ptr: pointer to beginning of the object
1069 *
1070 * Calling this function on an object will cause the memory block to be
1071 * ignored (not scanned and not reported as a leak). This is usually done when
1072 * it is known that the corresponding block is not a leak and does not contain
1073 * any references to other allocated memory blocks.
3c7b4e6b 1074 */
a6186d89 1075void __ref kmemleak_ignore(const void *ptr)
3c7b4e6b
CM
1076{
1077 pr_debug("%s(0x%p)\n", __func__, ptr);
1078
8910ae89 1079 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1080 make_black_object((unsigned long)ptr);
8910ae89 1081 else if (kmemleak_early_log)
c017b4be 1082 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
3c7b4e6b
CM
1083}
1084EXPORT_SYMBOL(kmemleak_ignore);
1085
a2b6bf63
CM
1086/**
1087 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1088 * @ptr: pointer to beginning or inside the object. This also
1089 * represents the start of the scan area
1090 * @size: size of the scan area
1091 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1092 *
1093 * This function is used when it is known that only certain parts of an object
1094 * contain references to other objects. Kmemleak will only scan these areas
1095 * reducing the number false negatives.
3c7b4e6b 1096 */
c017b4be 1097void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
1098{
1099 pr_debug("%s(0x%p)\n", __func__, ptr);
1100
8910ae89 1101 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
c017b4be 1102 add_scan_area((unsigned long)ptr, size, gfp);
8910ae89 1103 else if (kmemleak_early_log)
c017b4be 1104 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
3c7b4e6b
CM
1105}
1106EXPORT_SYMBOL(kmemleak_scan_area);
1107
a2b6bf63
CM
1108/**
1109 * kmemleak_no_scan - do not scan an allocated object
1110 * @ptr: pointer to beginning of the object
1111 *
1112 * This function notifies kmemleak not to scan the given memory block. Useful
1113 * in situations where it is known that the given object does not contain any
1114 * references to other objects. Kmemleak will not scan such objects reducing
1115 * the number of false negatives.
3c7b4e6b 1116 */
a6186d89 1117void __ref kmemleak_no_scan(const void *ptr)
3c7b4e6b
CM
1118{
1119 pr_debug("%s(0x%p)\n", __func__, ptr);
1120
8910ae89 1121 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1122 object_no_scan((unsigned long)ptr);
8910ae89 1123 else if (kmemleak_early_log)
c017b4be 1124 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
3c7b4e6b
CM
1125}
1126EXPORT_SYMBOL(kmemleak_no_scan);
1127
9099daed
CM
1128/**
1129 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1130 * address argument
1131 */
1132void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1133 gfp_t gfp)
1134{
1135 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1136 kmemleak_alloc(__va(phys), size, min_count, gfp);
1137}
1138EXPORT_SYMBOL(kmemleak_alloc_phys);
1139
1140/**
1141 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1142 * physical address argument
1143 */
1144void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1145{
1146 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1147 kmemleak_free_part(__va(phys), size);
1148}
1149EXPORT_SYMBOL(kmemleak_free_part_phys);
1150
1151/**
1152 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1153 * address argument
1154 */
1155void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1156{
1157 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1158 kmemleak_not_leak(__va(phys));
1159}
1160EXPORT_SYMBOL(kmemleak_not_leak_phys);
1161
1162/**
1163 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1164 * address argument
1165 */
1166void __ref kmemleak_ignore_phys(phys_addr_t phys)
1167{
1168 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1169 kmemleak_ignore(__va(phys));
1170}
1171EXPORT_SYMBOL(kmemleak_ignore_phys);
1172
04609ccc
CM
1173/*
1174 * Update an object's checksum and return true if it was modified.
1175 */
1176static bool update_checksum(struct kmemleak_object *object)
1177{
1178 u32 old_csum = object->checksum;
1179
1180 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1181 return false;
1182
e79ed2f1 1183 kasan_disable_current();
04609ccc 1184 object->checksum = crc32(0, (void *)object->pointer, object->size);
e79ed2f1
AR
1185 kasan_enable_current();
1186
04609ccc
CM
1187 return object->checksum != old_csum;
1188}
1189
04f70d13
CM
1190/*
1191 * Update an object's references. object->lock must be held by the caller.
1192 */
1193static void update_refs(struct kmemleak_object *object)
1194{
1195 if (!color_white(object)) {
1196 /* non-orphan, ignored or new */
1197 return;
1198 }
1199
1200 /*
1201 * Increase the object's reference count (number of pointers to the
1202 * memory block). If this count reaches the required minimum, the
1203 * object's color will become gray and it will be added to the
1204 * gray_list.
1205 */
1206 object->count++;
1207 if (color_gray(object)) {
1208 /* put_object() called when removing from gray_list */
1209 WARN_ON(!get_object(object));
1210 list_add_tail(&object->gray_list, &gray_list);
1211 }
1212}
1213
3c7b4e6b
CM
1214/*
1215 * Memory scanning is a long process and it needs to be interruptable. This
25985edc 1216 * function checks whether such interrupt condition occurred.
3c7b4e6b
CM
1217 */
1218static int scan_should_stop(void)
1219{
8910ae89 1220 if (!kmemleak_enabled)
3c7b4e6b
CM
1221 return 1;
1222
1223 /*
1224 * This function may be called from either process or kthread context,
1225 * hence the need to check for both stop conditions.
1226 */
1227 if (current->mm)
1228 return signal_pending(current);
1229 else
1230 return kthread_should_stop();
1231
1232 return 0;
1233}
1234
1235/*
1236 * Scan a memory block (exclusive range) for valid pointers and add those
1237 * found to the gray list.
1238 */
1239static void scan_block(void *_start, void *_end,
93ada579 1240 struct kmemleak_object *scanned)
3c7b4e6b
CM
1241{
1242 unsigned long *ptr;
1243 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1244 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
93ada579 1245 unsigned long flags;
3c7b4e6b 1246
93ada579 1247 read_lock_irqsave(&kmemleak_lock, flags);
3c7b4e6b 1248 for (ptr = start; ptr < end; ptr++) {
3c7b4e6b 1249 struct kmemleak_object *object;
8e019366 1250 unsigned long pointer;
3c7b4e6b
CM
1251
1252 if (scan_should_stop())
1253 break;
1254
8e019366
PE
1255 /* don't scan uninitialized memory */
1256 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1257 BYTES_PER_POINTER))
1258 continue;
1259
e79ed2f1 1260 kasan_disable_current();
8e019366 1261 pointer = *ptr;
e79ed2f1 1262 kasan_enable_current();
8e019366 1263
93ada579
CM
1264 if (pointer < min_addr || pointer >= max_addr)
1265 continue;
1266
1267 /*
1268 * No need for get_object() here since we hold kmemleak_lock.
1269 * object->use_count cannot be dropped to 0 while the object
1270 * is still present in object_tree_root and object_list
1271 * (with updates protected by kmemleak_lock).
1272 */
1273 object = lookup_object(pointer, 1);
3c7b4e6b
CM
1274 if (!object)
1275 continue;
93ada579 1276 if (object == scanned)
3c7b4e6b 1277 /* self referenced, ignore */
3c7b4e6b 1278 continue;
3c7b4e6b
CM
1279
1280 /*
1281 * Avoid the lockdep recursive warning on object->lock being
1282 * previously acquired in scan_object(). These locks are
1283 * enclosed by scan_mutex.
1284 */
93ada579 1285 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
04f70d13 1286 update_refs(object);
93ada579
CM
1287 spin_unlock(&object->lock);
1288 }
1289 read_unlock_irqrestore(&kmemleak_lock, flags);
1290}
0587da40 1291
93ada579
CM
1292/*
1293 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1294 */
1295static void scan_large_block(void *start, void *end)
1296{
1297 void *next;
1298
1299 while (start < end) {
1300 next = min(start + MAX_SCAN_SIZE, end);
1301 scan_block(start, next, NULL);
1302 start = next;
1303 cond_resched();
3c7b4e6b
CM
1304 }
1305}
1306
1307/*
1308 * Scan a memory block corresponding to a kmemleak_object. A condition is
1309 * that object->use_count >= 1.
1310 */
1311static void scan_object(struct kmemleak_object *object)
1312{
1313 struct kmemleak_scan_area *area;
3c7b4e6b
CM
1314 unsigned long flags;
1315
1316 /*
21ae2956
UKK
1317 * Once the object->lock is acquired, the corresponding memory block
1318 * cannot be freed (the same lock is acquired in delete_object).
3c7b4e6b
CM
1319 */
1320 spin_lock_irqsave(&object->lock, flags);
1321 if (object->flags & OBJECT_NO_SCAN)
1322 goto out;
1323 if (!(object->flags & OBJECT_ALLOCATED))
1324 /* already freed object */
1325 goto out;
af98603d
CM
1326 if (hlist_empty(&object->area_list)) {
1327 void *start = (void *)object->pointer;
1328 void *end = (void *)(object->pointer + object->size);
93ada579
CM
1329 void *next;
1330
1331 do {
1332 next = min(start + MAX_SCAN_SIZE, end);
1333 scan_block(start, next, object);
af98603d 1334
93ada579
CM
1335 start = next;
1336 if (start >= end)
1337 break;
af98603d
CM
1338
1339 spin_unlock_irqrestore(&object->lock, flags);
1340 cond_resched();
1341 spin_lock_irqsave(&object->lock, flags);
93ada579 1342 } while (object->flags & OBJECT_ALLOCATED);
af98603d 1343 } else
b67bfe0d 1344 hlist_for_each_entry(area, &object->area_list, node)
c017b4be
CM
1345 scan_block((void *)area->start,
1346 (void *)(area->start + area->size),
93ada579 1347 object);
3c7b4e6b
CM
1348out:
1349 spin_unlock_irqrestore(&object->lock, flags);
1350}
1351
04609ccc
CM
1352/*
1353 * Scan the objects already referenced (gray objects). More objects will be
1354 * referenced and, if there are no memory leaks, all the objects are scanned.
1355 */
1356static void scan_gray_list(void)
1357{
1358 struct kmemleak_object *object, *tmp;
1359
1360 /*
1361 * The list traversal is safe for both tail additions and removals
1362 * from inside the loop. The kmemleak objects cannot be freed from
1363 * outside the loop because their use_count was incremented.
1364 */
1365 object = list_entry(gray_list.next, typeof(*object), gray_list);
1366 while (&object->gray_list != &gray_list) {
1367 cond_resched();
1368
1369 /* may add new objects to the list */
1370 if (!scan_should_stop())
1371 scan_object(object);
1372
1373 tmp = list_entry(object->gray_list.next, typeof(*object),
1374 gray_list);
1375
1376 /* remove the object from the list and release it */
1377 list_del(&object->gray_list);
1378 put_object(object);
1379
1380 object = tmp;
1381 }
1382 WARN_ON(!list_empty(&gray_list));
1383}
1384
3c7b4e6b
CM
1385/*
1386 * Scan data sections and all the referenced memory blocks allocated via the
1387 * kernel's standard allocators. This function must be called with the
1388 * scan_mutex held.
1389 */
1390static void kmemleak_scan(void)
1391{
1392 unsigned long flags;
04609ccc 1393 struct kmemleak_object *object;
3c7b4e6b 1394 int i;
4698c1f2 1395 int new_leaks = 0;
3c7b4e6b 1396
acf4968e
CM
1397 jiffies_last_scan = jiffies;
1398
3c7b4e6b
CM
1399 /* prepare the kmemleak_object's */
1400 rcu_read_lock();
1401 list_for_each_entry_rcu(object, &object_list, object_list) {
1402 spin_lock_irqsave(&object->lock, flags);
1403#ifdef DEBUG
1404 /*
1405 * With a few exceptions there should be a maximum of
1406 * 1 reference to any object at this point.
1407 */
1408 if (atomic_read(&object->use_count) > 1) {
ae281064 1409 pr_debug("object->use_count = %d\n",
3c7b4e6b
CM
1410 atomic_read(&object->use_count));
1411 dump_object_info(object);
1412 }
1413#endif
1414 /* reset the reference count (whiten the object) */
1415 object->count = 0;
1416 if (color_gray(object) && get_object(object))
1417 list_add_tail(&object->gray_list, &gray_list);
1418
1419 spin_unlock_irqrestore(&object->lock, flags);
1420 }
1421 rcu_read_unlock();
1422
1423 /* data/bss scanning */
93ada579
CM
1424 scan_large_block(_sdata, _edata);
1425 scan_large_block(__bss_start, __bss_stop);
906f2a51 1426 scan_large_block(__start_ro_after_init, __end_ro_after_init);
3c7b4e6b
CM
1427
1428#ifdef CONFIG_SMP
1429 /* per-cpu sections scanning */
1430 for_each_possible_cpu(i)
93ada579
CM
1431 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1432 __per_cpu_end + per_cpu_offset(i));
3c7b4e6b
CM
1433#endif
1434
1435 /*
029aeff5 1436 * Struct page scanning for each node.
3c7b4e6b 1437 */
bfc8c901 1438 get_online_mems();
3c7b4e6b 1439 for_each_online_node(i) {
108bcc96
CS
1440 unsigned long start_pfn = node_start_pfn(i);
1441 unsigned long end_pfn = node_end_pfn(i);
3c7b4e6b
CM
1442 unsigned long pfn;
1443
1444 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1445 struct page *page;
1446
1447 if (!pfn_valid(pfn))
1448 continue;
1449 page = pfn_to_page(pfn);
1450 /* only scan if page is in use */
1451 if (page_count(page) == 0)
1452 continue;
93ada579 1453 scan_block(page, page + 1, NULL);
3c7b4e6b
CM
1454 }
1455 }
bfc8c901 1456 put_online_mems();
3c7b4e6b
CM
1457
1458 /*
43ed5d6e 1459 * Scanning the task stacks (may introduce false negatives).
3c7b4e6b
CM
1460 */
1461 if (kmemleak_stack_scan) {
43ed5d6e
CM
1462 struct task_struct *p, *g;
1463
3c7b4e6b 1464 read_lock(&tasklist_lock);
43ed5d6e 1465 do_each_thread(g, p) {
37df49f4
CM
1466 void *stack = try_get_task_stack(p);
1467 if (stack) {
1468 scan_block(stack, stack + THREAD_SIZE, NULL);
1469 put_task_stack(p);
1470 }
43ed5d6e 1471 } while_each_thread(g, p);
3c7b4e6b
CM
1472 read_unlock(&tasklist_lock);
1473 }
1474
1475 /*
1476 * Scan the objects already referenced from the sections scanned
04609ccc 1477 * above.
3c7b4e6b 1478 */
04609ccc 1479 scan_gray_list();
2587362e
CM
1480
1481 /*
04609ccc
CM
1482 * Check for new or unreferenced objects modified since the previous
1483 * scan and color them gray until the next scan.
2587362e
CM
1484 */
1485 rcu_read_lock();
1486 list_for_each_entry_rcu(object, &object_list, object_list) {
1487 spin_lock_irqsave(&object->lock, flags);
04609ccc
CM
1488 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1489 && update_checksum(object) && get_object(object)) {
1490 /* color it gray temporarily */
1491 object->count = object->min_count;
2587362e
CM
1492 list_add_tail(&object->gray_list, &gray_list);
1493 }
1494 spin_unlock_irqrestore(&object->lock, flags);
1495 }
1496 rcu_read_unlock();
1497
04609ccc
CM
1498 /*
1499 * Re-scan the gray list for modified unreferenced objects.
1500 */
1501 scan_gray_list();
4698c1f2 1502
17bb9e0d 1503 /*
04609ccc 1504 * If scanning was stopped do not report any new unreferenced objects.
17bb9e0d 1505 */
04609ccc 1506 if (scan_should_stop())
17bb9e0d
CM
1507 return;
1508
4698c1f2
CM
1509 /*
1510 * Scanning result reporting.
1511 */
1512 rcu_read_lock();
1513 list_for_each_entry_rcu(object, &object_list, object_list) {
1514 spin_lock_irqsave(&object->lock, flags);
1515 if (unreferenced_object(object) &&
1516 !(object->flags & OBJECT_REPORTED)) {
1517 object->flags |= OBJECT_REPORTED;
1518 new_leaks++;
1519 }
1520 spin_unlock_irqrestore(&object->lock, flags);
1521 }
1522 rcu_read_unlock();
1523
dc9b3f42
LZ
1524 if (new_leaks) {
1525 kmemleak_found_leaks = true;
1526
756a025f
JP
1527 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1528 new_leaks);
dc9b3f42 1529 }
4698c1f2 1530
3c7b4e6b
CM
1531}
1532
1533/*
1534 * Thread function performing automatic memory scanning. Unreferenced objects
1535 * at the end of a memory scan are reported but only the first time.
1536 */
1537static int kmemleak_scan_thread(void *arg)
1538{
1539 static int first_run = 1;
1540
ae281064 1541 pr_info("Automatic memory scanning thread started\n");
bf2a76b3 1542 set_user_nice(current, 10);
3c7b4e6b
CM
1543
1544 /*
1545 * Wait before the first scan to allow the system to fully initialize.
1546 */
1547 if (first_run) {
98c42d94 1548 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
3c7b4e6b 1549 first_run = 0;
98c42d94
VN
1550 while (timeout && !kthread_should_stop())
1551 timeout = schedule_timeout_interruptible(timeout);
3c7b4e6b
CM
1552 }
1553
1554 while (!kthread_should_stop()) {
3c7b4e6b
CM
1555 signed long timeout = jiffies_scan_wait;
1556
1557 mutex_lock(&scan_mutex);
3c7b4e6b 1558 kmemleak_scan();
3c7b4e6b 1559 mutex_unlock(&scan_mutex);
4698c1f2 1560
3c7b4e6b
CM
1561 /* wait before the next scan */
1562 while (timeout && !kthread_should_stop())
1563 timeout = schedule_timeout_interruptible(timeout);
1564 }
1565
ae281064 1566 pr_info("Automatic memory scanning thread ended\n");
3c7b4e6b
CM
1567
1568 return 0;
1569}
1570
1571/*
1572 * Start the automatic memory scanning thread. This function must be called
4698c1f2 1573 * with the scan_mutex held.
3c7b4e6b 1574 */
7eb0d5e5 1575static void start_scan_thread(void)
3c7b4e6b
CM
1576{
1577 if (scan_thread)
1578 return;
1579 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1580 if (IS_ERR(scan_thread)) {
598d8091 1581 pr_warn("Failed to create the scan thread\n");
3c7b4e6b
CM
1582 scan_thread = NULL;
1583 }
1584}
1585
1586/*
1587 * Stop the automatic memory scanning thread. This function must be called
4698c1f2 1588 * with the scan_mutex held.
3c7b4e6b 1589 */
7eb0d5e5 1590static void stop_scan_thread(void)
3c7b4e6b
CM
1591{
1592 if (scan_thread) {
1593 kthread_stop(scan_thread);
1594 scan_thread = NULL;
1595 }
1596}
1597
1598/*
1599 * Iterate over the object_list and return the first valid object at or after
1600 * the required position with its use_count incremented. The function triggers
1601 * a memory scanning when the pos argument points to the first position.
1602 */
1603static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1604{
1605 struct kmemleak_object *object;
1606 loff_t n = *pos;
b87324d0
CM
1607 int err;
1608
1609 err = mutex_lock_interruptible(&scan_mutex);
1610 if (err < 0)
1611 return ERR_PTR(err);
3c7b4e6b 1612
3c7b4e6b
CM
1613 rcu_read_lock();
1614 list_for_each_entry_rcu(object, &object_list, object_list) {
1615 if (n-- > 0)
1616 continue;
1617 if (get_object(object))
1618 goto out;
1619 }
1620 object = NULL;
1621out:
3c7b4e6b
CM
1622 return object;
1623}
1624
1625/*
1626 * Return the next object in the object_list. The function decrements the
1627 * use_count of the previous object and increases that of the next one.
1628 */
1629static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1630{
1631 struct kmemleak_object *prev_obj = v;
1632 struct kmemleak_object *next_obj = NULL;
58fac095 1633 struct kmemleak_object *obj = prev_obj;
3c7b4e6b
CM
1634
1635 ++(*pos);
3c7b4e6b 1636
58fac095 1637 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
52c3ce4e
CM
1638 if (get_object(obj)) {
1639 next_obj = obj;
3c7b4e6b 1640 break;
52c3ce4e 1641 }
3c7b4e6b 1642 }
288c857d 1643
3c7b4e6b
CM
1644 put_object(prev_obj);
1645 return next_obj;
1646}
1647
1648/*
1649 * Decrement the use_count of the last object required, if any.
1650 */
1651static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1652{
b87324d0
CM
1653 if (!IS_ERR(v)) {
1654 /*
1655 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1656 * waiting was interrupted, so only release it if !IS_ERR.
1657 */
f5886c7f 1658 rcu_read_unlock();
b87324d0
CM
1659 mutex_unlock(&scan_mutex);
1660 if (v)
1661 put_object(v);
1662 }
3c7b4e6b
CM
1663}
1664
1665/*
1666 * Print the information for an unreferenced object to the seq file.
1667 */
1668static int kmemleak_seq_show(struct seq_file *seq, void *v)
1669{
1670 struct kmemleak_object *object = v;
1671 unsigned long flags;
1672
1673 spin_lock_irqsave(&object->lock, flags);
288c857d 1674 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
17bb9e0d 1675 print_unreferenced(seq, object);
3c7b4e6b
CM
1676 spin_unlock_irqrestore(&object->lock, flags);
1677 return 0;
1678}
1679
1680static const struct seq_operations kmemleak_seq_ops = {
1681 .start = kmemleak_seq_start,
1682 .next = kmemleak_seq_next,
1683 .stop = kmemleak_seq_stop,
1684 .show = kmemleak_seq_show,
1685};
1686
1687static int kmemleak_open(struct inode *inode, struct file *file)
1688{
b87324d0 1689 return seq_open(file, &kmemleak_seq_ops);
3c7b4e6b
CM
1690}
1691
189d84ed
CM
1692static int dump_str_object_info(const char *str)
1693{
1694 unsigned long flags;
1695 struct kmemleak_object *object;
1696 unsigned long addr;
1697
dc053733
AP
1698 if (kstrtoul(str, 0, &addr))
1699 return -EINVAL;
189d84ed
CM
1700 object = find_and_get_object(addr, 0);
1701 if (!object) {
1702 pr_info("Unknown object at 0x%08lx\n", addr);
1703 return -EINVAL;
1704 }
1705
1706 spin_lock_irqsave(&object->lock, flags);
1707 dump_object_info(object);
1708 spin_unlock_irqrestore(&object->lock, flags);
1709
1710 put_object(object);
1711 return 0;
1712}
1713
30b37101
LR
1714/*
1715 * We use grey instead of black to ensure we can do future scans on the same
1716 * objects. If we did not do future scans these black objects could
1717 * potentially contain references to newly allocated objects in the future and
1718 * we'd end up with false positives.
1719 */
1720static void kmemleak_clear(void)
1721{
1722 struct kmemleak_object *object;
1723 unsigned long flags;
1724
1725 rcu_read_lock();
1726 list_for_each_entry_rcu(object, &object_list, object_list) {
1727 spin_lock_irqsave(&object->lock, flags);
1728 if ((object->flags & OBJECT_REPORTED) &&
1729 unreferenced_object(object))
a1084c87 1730 __paint_it(object, KMEMLEAK_GREY);
30b37101
LR
1731 spin_unlock_irqrestore(&object->lock, flags);
1732 }
1733 rcu_read_unlock();
dc9b3f42
LZ
1734
1735 kmemleak_found_leaks = false;
30b37101
LR
1736}
1737
c89da70c
LZ
1738static void __kmemleak_do_cleanup(void);
1739
3c7b4e6b
CM
1740/*
1741 * File write operation to configure kmemleak at run-time. The following
1742 * commands can be written to the /sys/kernel/debug/kmemleak file:
1743 * off - disable kmemleak (irreversible)
1744 * stack=on - enable the task stacks scanning
1745 * stack=off - disable the tasks stacks scanning
1746 * scan=on - start the automatic memory scanning thread
1747 * scan=off - stop the automatic memory scanning thread
1748 * scan=... - set the automatic memory scanning period in seconds (0 to
1749 * disable it)
4698c1f2 1750 * scan - trigger a memory scan
30b37101 1751 * clear - mark all current reported unreferenced kmemleak objects as
c89da70c
LZ
1752 * grey to ignore printing them, or free all kmemleak objects
1753 * if kmemleak has been disabled.
189d84ed 1754 * dump=... - dump information about the object found at the given address
3c7b4e6b
CM
1755 */
1756static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1757 size_t size, loff_t *ppos)
1758{
1759 char buf[64];
1760 int buf_size;
b87324d0 1761 int ret;
3c7b4e6b
CM
1762
1763 buf_size = min(size, (sizeof(buf) - 1));
1764 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1765 return -EFAULT;
1766 buf[buf_size] = 0;
1767
b87324d0
CM
1768 ret = mutex_lock_interruptible(&scan_mutex);
1769 if (ret < 0)
1770 return ret;
1771
c89da70c 1772 if (strncmp(buf, "clear", 5) == 0) {
8910ae89 1773 if (kmemleak_enabled)
c89da70c
LZ
1774 kmemleak_clear();
1775 else
1776 __kmemleak_do_cleanup();
1777 goto out;
1778 }
1779
8910ae89 1780 if (!kmemleak_enabled) {
c89da70c
LZ
1781 ret = -EBUSY;
1782 goto out;
1783 }
1784
3c7b4e6b
CM
1785 if (strncmp(buf, "off", 3) == 0)
1786 kmemleak_disable();
1787 else if (strncmp(buf, "stack=on", 8) == 0)
1788 kmemleak_stack_scan = 1;
1789 else if (strncmp(buf, "stack=off", 9) == 0)
1790 kmemleak_stack_scan = 0;
1791 else if (strncmp(buf, "scan=on", 7) == 0)
1792 start_scan_thread();
1793 else if (strncmp(buf, "scan=off", 8) == 0)
1794 stop_scan_thread();
1795 else if (strncmp(buf, "scan=", 5) == 0) {
1796 unsigned long secs;
3c7b4e6b 1797
3dbb95f7 1798 ret = kstrtoul(buf + 5, 0, &secs);
b87324d0
CM
1799 if (ret < 0)
1800 goto out;
3c7b4e6b
CM
1801 stop_scan_thread();
1802 if (secs) {
1803 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1804 start_scan_thread();
1805 }
4698c1f2
CM
1806 } else if (strncmp(buf, "scan", 4) == 0)
1807 kmemleak_scan();
189d84ed
CM
1808 else if (strncmp(buf, "dump=", 5) == 0)
1809 ret = dump_str_object_info(buf + 5);
4698c1f2 1810 else
b87324d0
CM
1811 ret = -EINVAL;
1812
1813out:
1814 mutex_unlock(&scan_mutex);
1815 if (ret < 0)
1816 return ret;
3c7b4e6b
CM
1817
1818 /* ignore the rest of the buffer, only one command at a time */
1819 *ppos += size;
1820 return size;
1821}
1822
1823static const struct file_operations kmemleak_fops = {
1824 .owner = THIS_MODULE,
1825 .open = kmemleak_open,
1826 .read = seq_read,
1827 .write = kmemleak_write,
1828 .llseek = seq_lseek,
5f3bf19a 1829 .release = seq_release,
3c7b4e6b
CM
1830};
1831
c89da70c
LZ
1832static void __kmemleak_do_cleanup(void)
1833{
1834 struct kmemleak_object *object;
1835
1836 rcu_read_lock();
1837 list_for_each_entry_rcu(object, &object_list, object_list)
1838 delete_object_full(object->pointer);
1839 rcu_read_unlock();
1840}
1841
3c7b4e6b 1842/*
74341703
CM
1843 * Stop the memory scanning thread and free the kmemleak internal objects if
1844 * no previous scan thread (otherwise, kmemleak may still have some useful
1845 * information on memory leaks).
3c7b4e6b 1846 */
179a8100 1847static void kmemleak_do_cleanup(struct work_struct *work)
3c7b4e6b 1848{
3c7b4e6b 1849 stop_scan_thread();
3c7b4e6b 1850
c5f3b1a5
CM
1851 /*
1852 * Once the scan thread has stopped, it is safe to no longer track
1853 * object freeing. Ordering of the scan thread stopping and the memory
1854 * accesses below is guaranteed by the kthread_stop() function.
1855 */
1856 kmemleak_free_enabled = 0;
1857
c89da70c
LZ
1858 if (!kmemleak_found_leaks)
1859 __kmemleak_do_cleanup();
1860 else
756a025f 1861 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
3c7b4e6b
CM
1862}
1863
179a8100 1864static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
3c7b4e6b
CM
1865
1866/*
1867 * Disable kmemleak. No memory allocation/freeing will be traced once this
1868 * function is called. Disabling kmemleak is an irreversible operation.
1869 */
1870static void kmemleak_disable(void)
1871{
1872 /* atomically check whether it was already invoked */
8910ae89 1873 if (cmpxchg(&kmemleak_error, 0, 1))
3c7b4e6b
CM
1874 return;
1875
1876 /* stop any memory operation tracing */
8910ae89 1877 kmemleak_enabled = 0;
3c7b4e6b
CM
1878
1879 /* check whether it is too early for a kernel thread */
8910ae89 1880 if (kmemleak_initialized)
179a8100 1881 schedule_work(&cleanup_work);
c5f3b1a5
CM
1882 else
1883 kmemleak_free_enabled = 0;
3c7b4e6b
CM
1884
1885 pr_info("Kernel memory leak detector disabled\n");
1886}
1887
1888/*
1889 * Allow boot-time kmemleak disabling (enabled by default).
1890 */
1891static int kmemleak_boot_config(char *str)
1892{
1893 if (!str)
1894 return -EINVAL;
1895 if (strcmp(str, "off") == 0)
1896 kmemleak_disable();
ab0155a2
JB
1897 else if (strcmp(str, "on") == 0)
1898 kmemleak_skip_disable = 1;
1899 else
3c7b4e6b
CM
1900 return -EINVAL;
1901 return 0;
1902}
1903early_param("kmemleak", kmemleak_boot_config);
1904
5f79020c
CM
1905static void __init print_log_trace(struct early_log *log)
1906{
1907 struct stack_trace trace;
1908
1909 trace.nr_entries = log->trace_len;
1910 trace.entries = log->trace;
1911
1912 pr_notice("Early log backtrace:\n");
1913 print_stack_trace(&trace, 2);
1914}
1915
3c7b4e6b 1916/*
2030117d 1917 * Kmemleak initialization.
3c7b4e6b
CM
1918 */
1919void __init kmemleak_init(void)
1920{
1921 int i;
1922 unsigned long flags;
1923
ab0155a2
JB
1924#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1925 if (!kmemleak_skip_disable) {
3551a928 1926 kmemleak_early_log = 0;
ab0155a2
JB
1927 kmemleak_disable();
1928 return;
1929 }
1930#endif
1931
3c7b4e6b
CM
1932 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1933 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1934
1935 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1936 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
3c7b4e6b 1937
21cd3a60 1938 if (crt_early_log > ARRAY_SIZE(early_log))
598d8091
JP
1939 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
1940 crt_early_log);
b6693005 1941
3c7b4e6b
CM
1942 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1943 local_irq_save(flags);
3551a928 1944 kmemleak_early_log = 0;
8910ae89 1945 if (kmemleak_error) {
b6693005
CM
1946 local_irq_restore(flags);
1947 return;
c5f3b1a5 1948 } else {
8910ae89 1949 kmemleak_enabled = 1;
c5f3b1a5
CM
1950 kmemleak_free_enabled = 1;
1951 }
3c7b4e6b
CM
1952 local_irq_restore(flags);
1953
1954 /*
1955 * This is the point where tracking allocations is safe. Automatic
1956 * scanning is started during the late initcall. Add the early logged
1957 * callbacks to the kmemleak infrastructure.
1958 */
1959 for (i = 0; i < crt_early_log; i++) {
1960 struct early_log *log = &early_log[i];
1961
1962 switch (log->op_type) {
1963 case KMEMLEAK_ALLOC:
fd678967 1964 early_alloc(log);
3c7b4e6b 1965 break;
f528f0b8
CM
1966 case KMEMLEAK_ALLOC_PERCPU:
1967 early_alloc_percpu(log);
1968 break;
3c7b4e6b
CM
1969 case KMEMLEAK_FREE:
1970 kmemleak_free(log->ptr);
1971 break;
53238a60
CM
1972 case KMEMLEAK_FREE_PART:
1973 kmemleak_free_part(log->ptr, log->size);
1974 break;
f528f0b8
CM
1975 case KMEMLEAK_FREE_PERCPU:
1976 kmemleak_free_percpu(log->ptr);
1977 break;
3c7b4e6b
CM
1978 case KMEMLEAK_NOT_LEAK:
1979 kmemleak_not_leak(log->ptr);
1980 break;
1981 case KMEMLEAK_IGNORE:
1982 kmemleak_ignore(log->ptr);
1983 break;
1984 case KMEMLEAK_SCAN_AREA:
c017b4be 1985 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
3c7b4e6b
CM
1986 break;
1987 case KMEMLEAK_NO_SCAN:
1988 kmemleak_no_scan(log->ptr);
1989 break;
1990 default:
5f79020c
CM
1991 kmemleak_warn("Unknown early log operation: %d\n",
1992 log->op_type);
1993 }
1994
8910ae89 1995 if (kmemleak_warning) {
5f79020c 1996 print_log_trace(log);
8910ae89 1997 kmemleak_warning = 0;
3c7b4e6b
CM
1998 }
1999 }
2000}
2001
2002/*
2003 * Late initialization function.
2004 */
2005static int __init kmemleak_late_init(void)
2006{
2007 struct dentry *dentry;
2008
8910ae89 2009 kmemleak_initialized = 1;
3c7b4e6b 2010
8910ae89 2011 if (kmemleak_error) {
3c7b4e6b 2012 /*
25985edc 2013 * Some error occurred and kmemleak was disabled. There is a
3c7b4e6b
CM
2014 * small chance that kmemleak_disable() was called immediately
2015 * after setting kmemleak_initialized and we may end up with
2016 * two clean-up threads but serialized by scan_mutex.
2017 */
179a8100 2018 schedule_work(&cleanup_work);
3c7b4e6b
CM
2019 return -ENOMEM;
2020 }
2021
2022 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
2023 &kmemleak_fops);
2024 if (!dentry)
598d8091 2025 pr_warn("Failed to create the debugfs kmemleak file\n");
4698c1f2 2026 mutex_lock(&scan_mutex);
3c7b4e6b 2027 start_scan_thread();
4698c1f2 2028 mutex_unlock(&scan_mutex);
3c7b4e6b
CM
2029
2030 pr_info("Kernel memory leak detector initialized\n");
2031
2032 return 0;
2033}
2034late_initcall(kmemleak_late_init);