]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/kmemleak.c
memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants
[thirdparty/kernel/stable.git] / mm / kmemleak.c
CommitLineData
3c7b4e6b
CM
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22901c6c 22 * Documentation/dev-tools/kmemleak.rst.
3c7b4e6b
CM
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
85d3a316 32 * blocks. The object_tree_root is a red black tree used to look-up
3c7b4e6b
CM
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
4698c1f2
CM
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
3c7b4e6b 55 *
93ada579 56 * Locks and mutexes are acquired/nested in the following order:
9d5a4c73 57 *
93ada579
CM
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59 *
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
9d5a4c73 62 *
3c7b4e6b
CM
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
69 */
70
ae281064
JP
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
3c7b4e6b
CM
73#include <linux/init.h>
74#include <linux/kernel.h>
75#include <linux/list.h>
3f07c014 76#include <linux/sched/signal.h>
29930025 77#include <linux/sched/task.h>
68db0cf1 78#include <linux/sched/task_stack.h>
3c7b4e6b
CM
79#include <linux/jiffies.h>
80#include <linux/delay.h>
b95f1b31 81#include <linux/export.h>
3c7b4e6b 82#include <linux/kthread.h>
85d3a316 83#include <linux/rbtree.h>
3c7b4e6b
CM
84#include <linux/fs.h>
85#include <linux/debugfs.h>
86#include <linux/seq_file.h>
87#include <linux/cpumask.h>
88#include <linux/spinlock.h>
154221c3 89#include <linux/module.h>
3c7b4e6b
CM
90#include <linux/mutex.h>
91#include <linux/rcupdate.h>
92#include <linux/stacktrace.h>
93#include <linux/cache.h>
94#include <linux/percpu.h>
9099daed
CM
95#include <linux/bootmem.h>
96#include <linux/pfn.h>
3c7b4e6b
CM
97#include <linux/mmzone.h>
98#include <linux/slab.h>
99#include <linux/thread_info.h>
100#include <linux/err.h>
101#include <linux/uaccess.h>
102#include <linux/string.h>
103#include <linux/nodemask.h>
104#include <linux/mm.h>
179a8100 105#include <linux/workqueue.h>
04609ccc 106#include <linux/crc32.h>
3c7b4e6b
CM
107
108#include <asm/sections.h>
109#include <asm/processor.h>
60063497 110#include <linux/atomic.h>
3c7b4e6b 111
e79ed2f1 112#include <linux/kasan.h>
3c7b4e6b 113#include <linux/kmemleak.h>
029aeff5 114#include <linux/memory_hotplug.h>
3c7b4e6b
CM
115
116/*
117 * Kmemleak configuration and common defines.
118 */
119#define MAX_TRACE 16 /* stack trace length */
3c7b4e6b 120#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
3c7b4e6b
CM
121#define SECS_FIRST_SCAN 60 /* delay before the first scan */
122#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
af98603d 123#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
3c7b4e6b
CM
124
125#define BYTES_PER_POINTER sizeof(void *)
126
216c04b0 127/* GFP bitmask for kmemleak internal allocations */
20b5c303 128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
6ae4bd1f 129 __GFP_NORETRY | __GFP_NOMEMALLOC | \
d9570ee3 130 __GFP_NOWARN | __GFP_NOFAIL)
216c04b0 131
3c7b4e6b
CM
132/* scanning area inside a memory block */
133struct kmemleak_scan_area {
134 struct hlist_node node;
c017b4be
CM
135 unsigned long start;
136 size_t size;
3c7b4e6b
CM
137};
138
a1084c87
LR
139#define KMEMLEAK_GREY 0
140#define KMEMLEAK_BLACK -1
141
3c7b4e6b
CM
142/*
143 * Structure holding the metadata for each allocated memory block.
144 * Modifications to such objects should be made while holding the
145 * object->lock. Insertions or deletions from object_list, gray_list or
85d3a316 146 * rb_node are already protected by the corresponding locks or mutex (see
3c7b4e6b
CM
147 * the notes on locking above). These objects are reference-counted
148 * (use_count) and freed using the RCU mechanism.
149 */
150struct kmemleak_object {
151 spinlock_t lock;
f66abf09 152 unsigned int flags; /* object status flags */
3c7b4e6b
CM
153 struct list_head object_list;
154 struct list_head gray_list;
85d3a316 155 struct rb_node rb_node;
3c7b4e6b
CM
156 struct rcu_head rcu; /* object_list lockless traversal */
157 /* object usage count; object freed when use_count == 0 */
158 atomic_t use_count;
159 unsigned long pointer;
160 size_t size;
94f4a161
CM
161 /* pass surplus references to this pointer */
162 unsigned long excess_ref;
3c7b4e6b
CM
163 /* minimum number of a pointers found before it is considered leak */
164 int min_count;
165 /* the total number of pointers found pointing to this object */
166 int count;
04609ccc
CM
167 /* checksum for detecting modified objects */
168 u32 checksum;
3c7b4e6b
CM
169 /* memory ranges to be scanned inside an object (empty for all) */
170 struct hlist_head area_list;
171 unsigned long trace[MAX_TRACE];
172 unsigned int trace_len;
173 unsigned long jiffies; /* creation timestamp */
174 pid_t pid; /* pid of the current task */
175 char comm[TASK_COMM_LEN]; /* executable name */
176};
177
178/* flag representing the memory block allocation status */
179#define OBJECT_ALLOCATED (1 << 0)
180/* flag set after the first reporting of an unreference object */
181#define OBJECT_REPORTED (1 << 1)
182/* flag set to not scan the object */
183#define OBJECT_NO_SCAN (1 << 2)
184
154221c3 185#define HEX_PREFIX " "
0494e082
SS
186/* number of bytes to print per line; must be 16 or 32 */
187#define HEX_ROW_SIZE 16
188/* number of bytes to print at a time (1, 2, 4, 8) */
189#define HEX_GROUP_SIZE 1
190/* include ASCII after the hex output */
191#define HEX_ASCII 1
192/* max number of lines to be printed */
193#define HEX_MAX_LINES 2
194
3c7b4e6b
CM
195/* the list of all allocated objects */
196static LIST_HEAD(object_list);
197/* the list of gray-colored objects (see color_gray comment below) */
198static LIST_HEAD(gray_list);
85d3a316
ML
199/* search tree for object boundaries */
200static struct rb_root object_tree_root = RB_ROOT;
201/* rw_lock protecting the access to object_list and object_tree_root */
3c7b4e6b
CM
202static DEFINE_RWLOCK(kmemleak_lock);
203
204/* allocation caches for kmemleak internal data */
205static struct kmem_cache *object_cache;
206static struct kmem_cache *scan_area_cache;
207
208/* set if tracing memory operations is enabled */
8910ae89 209static int kmemleak_enabled;
c5f3b1a5
CM
210/* same as above but only for the kmemleak_free() callback */
211static int kmemleak_free_enabled;
3c7b4e6b 212/* set in the late_initcall if there were no errors */
8910ae89 213static int kmemleak_initialized;
3c7b4e6b 214/* enables or disables early logging of the memory operations */
8910ae89 215static int kmemleak_early_log = 1;
5f79020c 216/* set if a kmemleak warning was issued */
8910ae89 217static int kmemleak_warning;
5f79020c 218/* set if a fatal kmemleak error has occurred */
8910ae89 219static int kmemleak_error;
3c7b4e6b
CM
220
221/* minimum and maximum address that may be valid pointers */
222static unsigned long min_addr = ULONG_MAX;
223static unsigned long max_addr;
224
3c7b4e6b 225static struct task_struct *scan_thread;
acf4968e 226/* used to avoid reporting of recently allocated objects */
3c7b4e6b 227static unsigned long jiffies_min_age;
acf4968e 228static unsigned long jiffies_last_scan;
3c7b4e6b
CM
229/* delay between automatic memory scannings */
230static signed long jiffies_scan_wait;
231/* enables or disables the task stacks scanning */
e0a2a160 232static int kmemleak_stack_scan = 1;
4698c1f2 233/* protects the memory scanning, parameters and debug/kmemleak file access */
3c7b4e6b 234static DEFINE_MUTEX(scan_mutex);
ab0155a2
JB
235/* setting kmemleak=on, will set this var, skipping the disable */
236static int kmemleak_skip_disable;
dc9b3f42
LZ
237/* If there are leaks that can be reported */
238static bool kmemleak_found_leaks;
3c7b4e6b 239
154221c3
VW
240static bool kmemleak_verbose;
241module_param_named(verbose, kmemleak_verbose, bool, 0600);
242
3c7b4e6b 243/*
2030117d 244 * Early object allocation/freeing logging. Kmemleak is initialized after the
3c7b4e6b 245 * kernel allocator. However, both the kernel allocator and kmemleak may
2030117d 246 * allocate memory blocks which need to be tracked. Kmemleak defines an
3c7b4e6b
CM
247 * arbitrary buffer to hold the allocation/freeing information before it is
248 * fully initialized.
249 */
250
251/* kmemleak operation type for early logging */
252enum {
253 KMEMLEAK_ALLOC,
f528f0b8 254 KMEMLEAK_ALLOC_PERCPU,
3c7b4e6b 255 KMEMLEAK_FREE,
53238a60 256 KMEMLEAK_FREE_PART,
f528f0b8 257 KMEMLEAK_FREE_PERCPU,
3c7b4e6b
CM
258 KMEMLEAK_NOT_LEAK,
259 KMEMLEAK_IGNORE,
260 KMEMLEAK_SCAN_AREA,
94f4a161
CM
261 KMEMLEAK_NO_SCAN,
262 KMEMLEAK_SET_EXCESS_REF
3c7b4e6b
CM
263};
264
265/*
266 * Structure holding the information passed to kmemleak callbacks during the
267 * early logging.
268 */
269struct early_log {
270 int op_type; /* kmemleak operation type */
f66abf09 271 int min_count; /* minimum reference count */
3c7b4e6b 272 const void *ptr; /* allocated/freed memory block */
94f4a161
CM
273 union {
274 size_t size; /* memory block size */
275 unsigned long excess_ref; /* surplus reference passing */
276 };
fd678967
CM
277 unsigned long trace[MAX_TRACE]; /* stack trace */
278 unsigned int trace_len; /* stack trace length */
3c7b4e6b
CM
279};
280
281/* early logging buffer and current position */
a6186d89
CM
282static struct early_log
283 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
284static int crt_early_log __initdata;
3c7b4e6b
CM
285
286static void kmemleak_disable(void);
287
288/*
289 * Print a warning and dump the stack trace.
290 */
5f79020c 291#define kmemleak_warn(x...) do { \
598d8091 292 pr_warn(x); \
5f79020c 293 dump_stack(); \
8910ae89 294 kmemleak_warning = 1; \
3c7b4e6b
CM
295} while (0)
296
297/*
25985edc 298 * Macro invoked when a serious kmemleak condition occurred and cannot be
2030117d 299 * recovered from. Kmemleak will be disabled and further allocation/freeing
3c7b4e6b
CM
300 * tracing no longer available.
301 */
000814f4 302#define kmemleak_stop(x...) do { \
3c7b4e6b
CM
303 kmemleak_warn(x); \
304 kmemleak_disable(); \
305} while (0)
306
154221c3
VW
307#define warn_or_seq_printf(seq, fmt, ...) do { \
308 if (seq) \
309 seq_printf(seq, fmt, ##__VA_ARGS__); \
310 else \
311 pr_warn(fmt, ##__VA_ARGS__); \
312} while (0)
313
314static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
315 int rowsize, int groupsize, const void *buf,
316 size_t len, bool ascii)
317{
318 if (seq)
319 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
320 buf, len, ascii);
321 else
322 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
323 rowsize, groupsize, buf, len, ascii);
324}
325
0494e082
SS
326/*
327 * Printing of the objects hex dump to the seq file. The number of lines to be
328 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
329 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
330 * with the object->lock held.
331 */
332static void hex_dump_object(struct seq_file *seq,
333 struct kmemleak_object *object)
334{
335 const u8 *ptr = (const u8 *)object->pointer;
6fc37c49 336 size_t len;
0494e082
SS
337
338 /* limit the number of lines to HEX_MAX_LINES */
6fc37c49 339 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
0494e082 340
154221c3 341 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
5c335fe0 342 kasan_disable_current();
154221c3
VW
343 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
344 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
5c335fe0 345 kasan_enable_current();
0494e082
SS
346}
347
3c7b4e6b
CM
348/*
349 * Object colors, encoded with count and min_count:
350 * - white - orphan object, not enough references to it (count < min_count)
351 * - gray - not orphan, not marked as false positive (min_count == 0) or
352 * sufficient references to it (count >= min_count)
353 * - black - ignore, it doesn't contain references (e.g. text section)
354 * (min_count == -1). No function defined for this color.
355 * Newly created objects don't have any color assigned (object->count == -1)
356 * before the next memory scan when they become white.
357 */
4a558dd6 358static bool color_white(const struct kmemleak_object *object)
3c7b4e6b 359{
a1084c87
LR
360 return object->count != KMEMLEAK_BLACK &&
361 object->count < object->min_count;
3c7b4e6b
CM
362}
363
4a558dd6 364static bool color_gray(const struct kmemleak_object *object)
3c7b4e6b 365{
a1084c87
LR
366 return object->min_count != KMEMLEAK_BLACK &&
367 object->count >= object->min_count;
3c7b4e6b
CM
368}
369
3c7b4e6b
CM
370/*
371 * Objects are considered unreferenced only if their color is white, they have
372 * not be deleted and have a minimum age to avoid false positives caused by
373 * pointers temporarily stored in CPU registers.
374 */
4a558dd6 375static bool unreferenced_object(struct kmemleak_object *object)
3c7b4e6b 376{
04609ccc 377 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
acf4968e
CM
378 time_before_eq(object->jiffies + jiffies_min_age,
379 jiffies_last_scan);
3c7b4e6b
CM
380}
381
382/*
bab4a34a
CM
383 * Printing of the unreferenced objects information to the seq file. The
384 * print_unreferenced function must be called with the object->lock held.
3c7b4e6b 385 */
3c7b4e6b
CM
386static void print_unreferenced(struct seq_file *seq,
387 struct kmemleak_object *object)
388{
389 int i;
fefdd336 390 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
3c7b4e6b 391
154221c3 392 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
bab4a34a 393 object->pointer, object->size);
154221c3 394 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
fefdd336
CM
395 object->comm, object->pid, object->jiffies,
396 msecs_age / 1000, msecs_age % 1000);
0494e082 397 hex_dump_object(seq, object);
154221c3 398 warn_or_seq_printf(seq, " backtrace:\n");
3c7b4e6b
CM
399
400 for (i = 0; i < object->trace_len; i++) {
401 void *ptr = (void *)object->trace[i];
154221c3 402 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
3c7b4e6b
CM
403 }
404}
405
406/*
407 * Print the kmemleak_object information. This function is used mainly for
408 * debugging special cases when kmemleak operations. It must be called with
409 * the object->lock held.
410 */
411static void dump_object_info(struct kmemleak_object *object)
412{
413 struct stack_trace trace;
414
415 trace.nr_entries = object->trace_len;
416 trace.entries = object->trace;
417
ae281064 418 pr_notice("Object 0x%08lx (size %zu):\n",
85d3a316 419 object->pointer, object->size);
3c7b4e6b
CM
420 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
421 object->comm, object->pid, object->jiffies);
422 pr_notice(" min_count = %d\n", object->min_count);
423 pr_notice(" count = %d\n", object->count);
f66abf09 424 pr_notice(" flags = 0x%x\n", object->flags);
aae0ad7a 425 pr_notice(" checksum = %u\n", object->checksum);
3c7b4e6b
CM
426 pr_notice(" backtrace:\n");
427 print_stack_trace(&trace, 4);
428}
429
430/*
85d3a316 431 * Look-up a memory block metadata (kmemleak_object) in the object search
3c7b4e6b
CM
432 * tree based on a pointer value. If alias is 0, only values pointing to the
433 * beginning of the memory block are allowed. The kmemleak_lock must be held
434 * when calling this function.
435 */
436static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
437{
85d3a316
ML
438 struct rb_node *rb = object_tree_root.rb_node;
439
440 while (rb) {
441 struct kmemleak_object *object =
442 rb_entry(rb, struct kmemleak_object, rb_node);
443 if (ptr < object->pointer)
444 rb = object->rb_node.rb_left;
445 else if (object->pointer + object->size <= ptr)
446 rb = object->rb_node.rb_right;
447 else if (object->pointer == ptr || alias)
448 return object;
449 else {
5f79020c
CM
450 kmemleak_warn("Found object by alias at 0x%08lx\n",
451 ptr);
a7686a45 452 dump_object_info(object);
85d3a316 453 break;
3c7b4e6b 454 }
85d3a316
ML
455 }
456 return NULL;
3c7b4e6b
CM
457}
458
459/*
460 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
461 * that once an object's use_count reached 0, the RCU freeing was already
462 * registered and the object should no longer be used. This function must be
463 * called under the protection of rcu_read_lock().
464 */
465static int get_object(struct kmemleak_object *object)
466{
467 return atomic_inc_not_zero(&object->use_count);
468}
469
470/*
471 * RCU callback to free a kmemleak_object.
472 */
473static void free_object_rcu(struct rcu_head *rcu)
474{
b67bfe0d 475 struct hlist_node *tmp;
3c7b4e6b
CM
476 struct kmemleak_scan_area *area;
477 struct kmemleak_object *object =
478 container_of(rcu, struct kmemleak_object, rcu);
479
480 /*
481 * Once use_count is 0 (guaranteed by put_object), there is no other
482 * code accessing this object, hence no need for locking.
483 */
b67bfe0d
SL
484 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
485 hlist_del(&area->node);
3c7b4e6b
CM
486 kmem_cache_free(scan_area_cache, area);
487 }
488 kmem_cache_free(object_cache, object);
489}
490
491/*
492 * Decrement the object use_count. Once the count is 0, free the object using
493 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
494 * delete_object() path, the delayed RCU freeing ensures that there is no
495 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
496 * is also possible.
497 */
498static void put_object(struct kmemleak_object *object)
499{
500 if (!atomic_dec_and_test(&object->use_count))
501 return;
502
503 /* should only get here after delete_object was called */
504 WARN_ON(object->flags & OBJECT_ALLOCATED);
505
506 call_rcu(&object->rcu, free_object_rcu);
507}
508
509/*
85d3a316 510 * Look up an object in the object search tree and increase its use_count.
3c7b4e6b
CM
511 */
512static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
513{
514 unsigned long flags;
9fbed254 515 struct kmemleak_object *object;
3c7b4e6b
CM
516
517 rcu_read_lock();
518 read_lock_irqsave(&kmemleak_lock, flags);
93ada579 519 object = lookup_object(ptr, alias);
3c7b4e6b
CM
520 read_unlock_irqrestore(&kmemleak_lock, flags);
521
522 /* check whether the object is still available */
523 if (object && !get_object(object))
524 object = NULL;
525 rcu_read_unlock();
526
527 return object;
528}
529
e781a9ab
CM
530/*
531 * Look up an object in the object search tree and remove it from both
532 * object_tree_root and object_list. The returned object's use_count should be
533 * at least 1, as initially set by create_object().
534 */
535static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
536{
537 unsigned long flags;
538 struct kmemleak_object *object;
539
540 write_lock_irqsave(&kmemleak_lock, flags);
541 object = lookup_object(ptr, alias);
542 if (object) {
543 rb_erase(&object->rb_node, &object_tree_root);
544 list_del_rcu(&object->object_list);
545 }
546 write_unlock_irqrestore(&kmemleak_lock, flags);
547
548 return object;
549}
550
fd678967
CM
551/*
552 * Save stack trace to the given array of MAX_TRACE size.
553 */
554static int __save_stack_trace(unsigned long *trace)
555{
556 struct stack_trace stack_trace;
557
558 stack_trace.max_entries = MAX_TRACE;
559 stack_trace.nr_entries = 0;
560 stack_trace.entries = trace;
561 stack_trace.skip = 2;
562 save_stack_trace(&stack_trace);
563
564 return stack_trace.nr_entries;
565}
566
3c7b4e6b
CM
567/*
568 * Create the metadata (struct kmemleak_object) corresponding to an allocated
569 * memory block and add it to the object_list and object_tree_root.
570 */
fd678967
CM
571static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
572 int min_count, gfp_t gfp)
3c7b4e6b
CM
573{
574 unsigned long flags;
85d3a316
ML
575 struct kmemleak_object *object, *parent;
576 struct rb_node **link, *rb_parent;
3c7b4e6b 577
6ae4bd1f 578 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
3c7b4e6b 579 if (!object) {
598d8091 580 pr_warn("Cannot allocate a kmemleak_object structure\n");
6ae4bd1f 581 kmemleak_disable();
fd678967 582 return NULL;
3c7b4e6b
CM
583 }
584
585 INIT_LIST_HEAD(&object->object_list);
586 INIT_LIST_HEAD(&object->gray_list);
587 INIT_HLIST_HEAD(&object->area_list);
588 spin_lock_init(&object->lock);
589 atomic_set(&object->use_count, 1);
04609ccc 590 object->flags = OBJECT_ALLOCATED;
3c7b4e6b
CM
591 object->pointer = ptr;
592 object->size = size;
94f4a161 593 object->excess_ref = 0;
3c7b4e6b 594 object->min_count = min_count;
04609ccc 595 object->count = 0; /* white color initially */
3c7b4e6b 596 object->jiffies = jiffies;
04609ccc 597 object->checksum = 0;
3c7b4e6b
CM
598
599 /* task information */
600 if (in_irq()) {
601 object->pid = 0;
602 strncpy(object->comm, "hardirq", sizeof(object->comm));
603 } else if (in_softirq()) {
604 object->pid = 0;
605 strncpy(object->comm, "softirq", sizeof(object->comm));
606 } else {
607 object->pid = current->pid;
608 /*
609 * There is a small chance of a race with set_task_comm(),
610 * however using get_task_comm() here may cause locking
611 * dependency issues with current->alloc_lock. In the worst
612 * case, the command line is not correct.
613 */
614 strncpy(object->comm, current->comm, sizeof(object->comm));
615 }
616
617 /* kernel backtrace */
fd678967 618 object->trace_len = __save_stack_trace(object->trace);
3c7b4e6b 619
3c7b4e6b 620 write_lock_irqsave(&kmemleak_lock, flags);
0580a181 621
3c7b4e6b
CM
622 min_addr = min(min_addr, ptr);
623 max_addr = max(max_addr, ptr + size);
85d3a316
ML
624 link = &object_tree_root.rb_node;
625 rb_parent = NULL;
626 while (*link) {
627 rb_parent = *link;
628 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
629 if (ptr + size <= parent->pointer)
630 link = &parent->rb_node.rb_left;
631 else if (parent->pointer + parent->size <= ptr)
632 link = &parent->rb_node.rb_right;
633 else {
756a025f 634 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
85d3a316 635 ptr);
9d5a4c73
CM
636 /*
637 * No need for parent->lock here since "parent" cannot
638 * be freed while the kmemleak_lock is held.
639 */
640 dump_object_info(parent);
85d3a316 641 kmem_cache_free(object_cache, object);
9d5a4c73 642 object = NULL;
85d3a316
ML
643 goto out;
644 }
3c7b4e6b 645 }
85d3a316
ML
646 rb_link_node(&object->rb_node, rb_parent, link);
647 rb_insert_color(&object->rb_node, &object_tree_root);
648
3c7b4e6b
CM
649 list_add_tail_rcu(&object->object_list, &object_list);
650out:
651 write_unlock_irqrestore(&kmemleak_lock, flags);
fd678967 652 return object;
3c7b4e6b
CM
653}
654
655/*
e781a9ab 656 * Mark the object as not allocated and schedule RCU freeing via put_object().
3c7b4e6b 657 */
53238a60 658static void __delete_object(struct kmemleak_object *object)
3c7b4e6b
CM
659{
660 unsigned long flags;
3c7b4e6b 661
3c7b4e6b 662 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
e781a9ab 663 WARN_ON(atomic_read(&object->use_count) < 1);
3c7b4e6b
CM
664
665 /*
666 * Locking here also ensures that the corresponding memory block
667 * cannot be freed when it is being scanned.
668 */
669 spin_lock_irqsave(&object->lock, flags);
3c7b4e6b
CM
670 object->flags &= ~OBJECT_ALLOCATED;
671 spin_unlock_irqrestore(&object->lock, flags);
672 put_object(object);
673}
674
53238a60
CM
675/*
676 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
677 * delete it.
678 */
679static void delete_object_full(unsigned long ptr)
680{
681 struct kmemleak_object *object;
682
e781a9ab 683 object = find_and_remove_object(ptr, 0);
53238a60
CM
684 if (!object) {
685#ifdef DEBUG
686 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
687 ptr);
688#endif
689 return;
690 }
691 __delete_object(object);
53238a60
CM
692}
693
694/*
695 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
696 * delete it. If the memory block is partially freed, the function may create
697 * additional metadata for the remaining parts of the block.
698 */
699static void delete_object_part(unsigned long ptr, size_t size)
700{
701 struct kmemleak_object *object;
702 unsigned long start, end;
703
e781a9ab 704 object = find_and_remove_object(ptr, 1);
53238a60
CM
705 if (!object) {
706#ifdef DEBUG
756a025f
JP
707 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
708 ptr, size);
53238a60
CM
709#endif
710 return;
711 }
53238a60
CM
712
713 /*
714 * Create one or two objects that may result from the memory block
715 * split. Note that partial freeing is only done by free_bootmem() and
716 * this happens before kmemleak_init() is called. The path below is
717 * only executed during early log recording in kmemleak_init(), so
718 * GFP_KERNEL is enough.
719 */
720 start = object->pointer;
721 end = object->pointer + object->size;
722 if (ptr > start)
723 create_object(start, ptr - start, object->min_count,
724 GFP_KERNEL);
725 if (ptr + size < end)
726 create_object(ptr + size, end - ptr - size, object->min_count,
727 GFP_KERNEL);
728
e781a9ab 729 __delete_object(object);
53238a60 730}
a1084c87
LR
731
732static void __paint_it(struct kmemleak_object *object, int color)
733{
734 object->min_count = color;
735 if (color == KMEMLEAK_BLACK)
736 object->flags |= OBJECT_NO_SCAN;
737}
738
739static void paint_it(struct kmemleak_object *object, int color)
3c7b4e6b
CM
740{
741 unsigned long flags;
a1084c87
LR
742
743 spin_lock_irqsave(&object->lock, flags);
744 __paint_it(object, color);
745 spin_unlock_irqrestore(&object->lock, flags);
746}
747
748static void paint_ptr(unsigned long ptr, int color)
749{
3c7b4e6b
CM
750 struct kmemleak_object *object;
751
752 object = find_and_get_object(ptr, 0);
753 if (!object) {
756a025f
JP
754 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
755 ptr,
a1084c87
LR
756 (color == KMEMLEAK_GREY) ? "Grey" :
757 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
3c7b4e6b
CM
758 return;
759 }
a1084c87 760 paint_it(object, color);
3c7b4e6b
CM
761 put_object(object);
762}
763
a1084c87 764/*
145b64b9 765 * Mark an object permanently as gray-colored so that it can no longer be
a1084c87
LR
766 * reported as a leak. This is used in general to mark a false positive.
767 */
768static void make_gray_object(unsigned long ptr)
769{
770 paint_ptr(ptr, KMEMLEAK_GREY);
771}
772
3c7b4e6b
CM
773/*
774 * Mark the object as black-colored so that it is ignored from scans and
775 * reporting.
776 */
777static void make_black_object(unsigned long ptr)
778{
a1084c87 779 paint_ptr(ptr, KMEMLEAK_BLACK);
3c7b4e6b
CM
780}
781
782/*
783 * Add a scanning area to the object. If at least one such area is added,
784 * kmemleak will only scan these ranges rather than the whole memory block.
785 */
c017b4be 786static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
787{
788 unsigned long flags;
789 struct kmemleak_object *object;
790 struct kmemleak_scan_area *area;
791
c017b4be 792 object = find_and_get_object(ptr, 1);
3c7b4e6b 793 if (!object) {
ae281064
JP
794 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
795 ptr);
3c7b4e6b
CM
796 return;
797 }
798
6ae4bd1f 799 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
3c7b4e6b 800 if (!area) {
598d8091 801 pr_warn("Cannot allocate a scan area\n");
3c7b4e6b
CM
802 goto out;
803 }
804
805 spin_lock_irqsave(&object->lock, flags);
7f88f88f
CM
806 if (size == SIZE_MAX) {
807 size = object->pointer + object->size - ptr;
808 } else if (ptr + size > object->pointer + object->size) {
ae281064 809 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
3c7b4e6b
CM
810 dump_object_info(object);
811 kmem_cache_free(scan_area_cache, area);
812 goto out_unlock;
813 }
814
815 INIT_HLIST_NODE(&area->node);
c017b4be
CM
816 area->start = ptr;
817 area->size = size;
3c7b4e6b
CM
818
819 hlist_add_head(&area->node, &object->area_list);
820out_unlock:
821 spin_unlock_irqrestore(&object->lock, flags);
822out:
823 put_object(object);
824}
825
94f4a161
CM
826/*
827 * Any surplus references (object already gray) to 'ptr' are passed to
828 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
829 * vm_struct may be used as an alternative reference to the vmalloc'ed object
830 * (see free_thread_stack()).
831 */
832static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
833{
834 unsigned long flags;
835 struct kmemleak_object *object;
836
837 object = find_and_get_object(ptr, 0);
838 if (!object) {
839 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
840 ptr);
841 return;
842 }
843
844 spin_lock_irqsave(&object->lock, flags);
845 object->excess_ref = excess_ref;
846 spin_unlock_irqrestore(&object->lock, flags);
847 put_object(object);
848}
849
3c7b4e6b
CM
850/*
851 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
852 * pointer. Such object will not be scanned by kmemleak but references to it
853 * are searched.
854 */
855static void object_no_scan(unsigned long ptr)
856{
857 unsigned long flags;
858 struct kmemleak_object *object;
859
860 object = find_and_get_object(ptr, 0);
861 if (!object) {
ae281064 862 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
3c7b4e6b
CM
863 return;
864 }
865
866 spin_lock_irqsave(&object->lock, flags);
867 object->flags |= OBJECT_NO_SCAN;
868 spin_unlock_irqrestore(&object->lock, flags);
869 put_object(object);
870}
871
872/*
873 * Log an early kmemleak_* call to the early_log buffer. These calls will be
874 * processed later once kmemleak is fully initialized.
875 */
a6186d89 876static void __init log_early(int op_type, const void *ptr, size_t size,
c017b4be 877 int min_count)
3c7b4e6b
CM
878{
879 unsigned long flags;
880 struct early_log *log;
881
8910ae89 882 if (kmemleak_error) {
b6693005
CM
883 /* kmemleak stopped recording, just count the requests */
884 crt_early_log++;
885 return;
886 }
887
3c7b4e6b 888 if (crt_early_log >= ARRAY_SIZE(early_log)) {
21cd3a60 889 crt_early_log++;
a9d9058a 890 kmemleak_disable();
3c7b4e6b
CM
891 return;
892 }
893
894 /*
895 * There is no need for locking since the kernel is still in UP mode
896 * at this stage. Disabling the IRQs is enough.
897 */
898 local_irq_save(flags);
899 log = &early_log[crt_early_log];
900 log->op_type = op_type;
901 log->ptr = ptr;
902 log->size = size;
903 log->min_count = min_count;
5f79020c 904 log->trace_len = __save_stack_trace(log->trace);
3c7b4e6b
CM
905 crt_early_log++;
906 local_irq_restore(flags);
907}
908
fd678967
CM
909/*
910 * Log an early allocated block and populate the stack trace.
911 */
912static void early_alloc(struct early_log *log)
913{
914 struct kmemleak_object *object;
915 unsigned long flags;
916 int i;
917
8910ae89 918 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
fd678967
CM
919 return;
920
921 /*
922 * RCU locking needed to ensure object is not freed via put_object().
923 */
924 rcu_read_lock();
925 object = create_object((unsigned long)log->ptr, log->size,
c1bcd6b3 926 log->min_count, GFP_ATOMIC);
0d5d1aad
CM
927 if (!object)
928 goto out;
fd678967
CM
929 spin_lock_irqsave(&object->lock, flags);
930 for (i = 0; i < log->trace_len; i++)
931 object->trace[i] = log->trace[i];
932 object->trace_len = log->trace_len;
933 spin_unlock_irqrestore(&object->lock, flags);
0d5d1aad 934out:
fd678967
CM
935 rcu_read_unlock();
936}
937
f528f0b8
CM
938/*
939 * Log an early allocated block and populate the stack trace.
940 */
941static void early_alloc_percpu(struct early_log *log)
942{
943 unsigned int cpu;
944 const void __percpu *ptr = log->ptr;
945
946 for_each_possible_cpu(cpu) {
947 log->ptr = per_cpu_ptr(ptr, cpu);
948 early_alloc(log);
949 }
950}
951
a2b6bf63
CM
952/**
953 * kmemleak_alloc - register a newly allocated object
954 * @ptr: pointer to beginning of the object
955 * @size: size of the object
956 * @min_count: minimum number of references to this object. If during memory
957 * scanning a number of references less than @min_count is found,
958 * the object is reported as a memory leak. If @min_count is 0,
959 * the object is never reported as a leak. If @min_count is -1,
960 * the object is ignored (not scanned and not reported as a leak)
961 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
962 *
963 * This function is called from the kernel allocators when a new object
94f4a161 964 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
3c7b4e6b 965 */
a6186d89
CM
966void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
967 gfp_t gfp)
3c7b4e6b
CM
968{
969 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
970
8910ae89 971 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 972 create_object((unsigned long)ptr, size, min_count, gfp);
8910ae89 973 else if (kmemleak_early_log)
c017b4be 974 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
3c7b4e6b
CM
975}
976EXPORT_SYMBOL_GPL(kmemleak_alloc);
977
f528f0b8
CM
978/**
979 * kmemleak_alloc_percpu - register a newly allocated __percpu object
980 * @ptr: __percpu pointer to beginning of the object
981 * @size: size of the object
8a8c35fa 982 * @gfp: flags used for kmemleak internal memory allocations
f528f0b8
CM
983 *
984 * This function is called from the kernel percpu allocator when a new object
8a8c35fa 985 * (memory block) is allocated (alloc_percpu).
f528f0b8 986 */
8a8c35fa
LF
987void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
988 gfp_t gfp)
f528f0b8
CM
989{
990 unsigned int cpu;
991
992 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
993
994 /*
995 * Percpu allocations are only scanned and not reported as leaks
996 * (min_count is set to 0).
997 */
8910ae89 998 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
999 for_each_possible_cpu(cpu)
1000 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
8a8c35fa 1001 size, 0, gfp);
8910ae89 1002 else if (kmemleak_early_log)
f528f0b8
CM
1003 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
1004}
1005EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1006
94f4a161
CM
1007/**
1008 * kmemleak_vmalloc - register a newly vmalloc'ed object
1009 * @area: pointer to vm_struct
1010 * @size: size of the object
1011 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1012 *
1013 * This function is called from the vmalloc() kernel allocator when a new
1014 * object (memory block) is allocated.
1015 */
1016void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1017{
1018 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1019
1020 /*
1021 * A min_count = 2 is needed because vm_struct contains a reference to
1022 * the virtual address of the vmalloc'ed block.
1023 */
1024 if (kmemleak_enabled) {
1025 create_object((unsigned long)area->addr, size, 2, gfp);
1026 object_set_excess_ref((unsigned long)area,
1027 (unsigned long)area->addr);
1028 } else if (kmemleak_early_log) {
1029 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1030 /* reusing early_log.size for storing area->addr */
1031 log_early(KMEMLEAK_SET_EXCESS_REF,
1032 area, (unsigned long)area->addr, 0);
1033 }
1034}
1035EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1036
a2b6bf63
CM
1037/**
1038 * kmemleak_free - unregister a previously registered object
1039 * @ptr: pointer to beginning of the object
1040 *
1041 * This function is called from the kernel allocators when an object (memory
1042 * block) is freed (kmem_cache_free, kfree, vfree etc.).
3c7b4e6b 1043 */
a6186d89 1044void __ref kmemleak_free(const void *ptr)
3c7b4e6b
CM
1045{
1046 pr_debug("%s(0x%p)\n", __func__, ptr);
1047
c5f3b1a5 1048 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
53238a60 1049 delete_object_full((unsigned long)ptr);
8910ae89 1050 else if (kmemleak_early_log)
c017b4be 1051 log_early(KMEMLEAK_FREE, ptr, 0, 0);
3c7b4e6b
CM
1052}
1053EXPORT_SYMBOL_GPL(kmemleak_free);
1054
a2b6bf63
CM
1055/**
1056 * kmemleak_free_part - partially unregister a previously registered object
1057 * @ptr: pointer to the beginning or inside the object. This also
1058 * represents the start of the range to be freed
1059 * @size: size to be unregistered
1060 *
1061 * This function is called when only a part of a memory block is freed
1062 * (usually from the bootmem allocator).
53238a60 1063 */
a6186d89 1064void __ref kmemleak_free_part(const void *ptr, size_t size)
53238a60
CM
1065{
1066 pr_debug("%s(0x%p)\n", __func__, ptr);
1067
8910ae89 1068 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
53238a60 1069 delete_object_part((unsigned long)ptr, size);
8910ae89 1070 else if (kmemleak_early_log)
c017b4be 1071 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
53238a60
CM
1072}
1073EXPORT_SYMBOL_GPL(kmemleak_free_part);
1074
f528f0b8
CM
1075/**
1076 * kmemleak_free_percpu - unregister a previously registered __percpu object
1077 * @ptr: __percpu pointer to beginning of the object
1078 *
1079 * This function is called from the kernel percpu allocator when an object
1080 * (memory block) is freed (free_percpu).
1081 */
1082void __ref kmemleak_free_percpu(const void __percpu *ptr)
1083{
1084 unsigned int cpu;
1085
1086 pr_debug("%s(0x%p)\n", __func__, ptr);
1087
c5f3b1a5 1088 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
1089 for_each_possible_cpu(cpu)
1090 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1091 cpu));
8910ae89 1092 else if (kmemleak_early_log)
f528f0b8
CM
1093 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1094}
1095EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1096
ffe2c748
CM
1097/**
1098 * kmemleak_update_trace - update object allocation stack trace
1099 * @ptr: pointer to beginning of the object
1100 *
1101 * Override the object allocation stack trace for cases where the actual
1102 * allocation place is not always useful.
1103 */
1104void __ref kmemleak_update_trace(const void *ptr)
1105{
1106 struct kmemleak_object *object;
1107 unsigned long flags;
1108
1109 pr_debug("%s(0x%p)\n", __func__, ptr);
1110
1111 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1112 return;
1113
1114 object = find_and_get_object((unsigned long)ptr, 1);
1115 if (!object) {
1116#ifdef DEBUG
1117 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1118 ptr);
1119#endif
1120 return;
1121 }
1122
1123 spin_lock_irqsave(&object->lock, flags);
1124 object->trace_len = __save_stack_trace(object->trace);
1125 spin_unlock_irqrestore(&object->lock, flags);
1126
1127 put_object(object);
1128}
1129EXPORT_SYMBOL(kmemleak_update_trace);
1130
a2b6bf63
CM
1131/**
1132 * kmemleak_not_leak - mark an allocated object as false positive
1133 * @ptr: pointer to beginning of the object
1134 *
1135 * Calling this function on an object will cause the memory block to no longer
1136 * be reported as leak and always be scanned.
3c7b4e6b 1137 */
a6186d89 1138void __ref kmemleak_not_leak(const void *ptr)
3c7b4e6b
CM
1139{
1140 pr_debug("%s(0x%p)\n", __func__, ptr);
1141
8910ae89 1142 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1143 make_gray_object((unsigned long)ptr);
8910ae89 1144 else if (kmemleak_early_log)
c017b4be 1145 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
3c7b4e6b
CM
1146}
1147EXPORT_SYMBOL(kmemleak_not_leak);
1148
a2b6bf63
CM
1149/**
1150 * kmemleak_ignore - ignore an allocated object
1151 * @ptr: pointer to beginning of the object
1152 *
1153 * Calling this function on an object will cause the memory block to be
1154 * ignored (not scanned and not reported as a leak). This is usually done when
1155 * it is known that the corresponding block is not a leak and does not contain
1156 * any references to other allocated memory blocks.
3c7b4e6b 1157 */
a6186d89 1158void __ref kmemleak_ignore(const void *ptr)
3c7b4e6b
CM
1159{
1160 pr_debug("%s(0x%p)\n", __func__, ptr);
1161
8910ae89 1162 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1163 make_black_object((unsigned long)ptr);
8910ae89 1164 else if (kmemleak_early_log)
c017b4be 1165 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
3c7b4e6b
CM
1166}
1167EXPORT_SYMBOL(kmemleak_ignore);
1168
a2b6bf63
CM
1169/**
1170 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1171 * @ptr: pointer to beginning or inside the object. This also
1172 * represents the start of the scan area
1173 * @size: size of the scan area
1174 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1175 *
1176 * This function is used when it is known that only certain parts of an object
1177 * contain references to other objects. Kmemleak will only scan these areas
1178 * reducing the number false negatives.
3c7b4e6b 1179 */
c017b4be 1180void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
1181{
1182 pr_debug("%s(0x%p)\n", __func__, ptr);
1183
8910ae89 1184 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
c017b4be 1185 add_scan_area((unsigned long)ptr, size, gfp);
8910ae89 1186 else if (kmemleak_early_log)
c017b4be 1187 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
3c7b4e6b
CM
1188}
1189EXPORT_SYMBOL(kmemleak_scan_area);
1190
a2b6bf63
CM
1191/**
1192 * kmemleak_no_scan - do not scan an allocated object
1193 * @ptr: pointer to beginning of the object
1194 *
1195 * This function notifies kmemleak not to scan the given memory block. Useful
1196 * in situations where it is known that the given object does not contain any
1197 * references to other objects. Kmemleak will not scan such objects reducing
1198 * the number of false negatives.
3c7b4e6b 1199 */
a6186d89 1200void __ref kmemleak_no_scan(const void *ptr)
3c7b4e6b
CM
1201{
1202 pr_debug("%s(0x%p)\n", __func__, ptr);
1203
8910ae89 1204 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1205 object_no_scan((unsigned long)ptr);
8910ae89 1206 else if (kmemleak_early_log)
c017b4be 1207 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
3c7b4e6b
CM
1208}
1209EXPORT_SYMBOL(kmemleak_no_scan);
1210
9099daed
CM
1211/**
1212 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1213 * address argument
e8b098fc
MR
1214 * @phys: physical address of the object
1215 * @size: size of the object
1216 * @min_count: minimum number of references to this object.
1217 * See kmemleak_alloc()
1218 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
9099daed
CM
1219 */
1220void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1221 gfp_t gfp)
1222{
1223 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1224 kmemleak_alloc(__va(phys), size, min_count, gfp);
1225}
1226EXPORT_SYMBOL(kmemleak_alloc_phys);
1227
1228/**
1229 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1230 * physical address argument
e8b098fc
MR
1231 * @phys: physical address if the beginning or inside an object. This
1232 * also represents the start of the range to be freed
1233 * @size: size to be unregistered
9099daed
CM
1234 */
1235void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1236{
1237 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1238 kmemleak_free_part(__va(phys), size);
1239}
1240EXPORT_SYMBOL(kmemleak_free_part_phys);
1241
1242/**
1243 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1244 * address argument
e8b098fc 1245 * @phys: physical address of the object
9099daed
CM
1246 */
1247void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1248{
1249 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1250 kmemleak_not_leak(__va(phys));
1251}
1252EXPORT_SYMBOL(kmemleak_not_leak_phys);
1253
1254/**
1255 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1256 * address argument
e8b098fc 1257 * @phys: physical address of the object
9099daed
CM
1258 */
1259void __ref kmemleak_ignore_phys(phys_addr_t phys)
1260{
1261 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1262 kmemleak_ignore(__va(phys));
1263}
1264EXPORT_SYMBOL(kmemleak_ignore_phys);
1265
04609ccc
CM
1266/*
1267 * Update an object's checksum and return true if it was modified.
1268 */
1269static bool update_checksum(struct kmemleak_object *object)
1270{
1271 u32 old_csum = object->checksum;
1272
e79ed2f1 1273 kasan_disable_current();
04609ccc 1274 object->checksum = crc32(0, (void *)object->pointer, object->size);
e79ed2f1
AR
1275 kasan_enable_current();
1276
04609ccc
CM
1277 return object->checksum != old_csum;
1278}
1279
04f70d13
CM
1280/*
1281 * Update an object's references. object->lock must be held by the caller.
1282 */
1283static void update_refs(struct kmemleak_object *object)
1284{
1285 if (!color_white(object)) {
1286 /* non-orphan, ignored or new */
1287 return;
1288 }
1289
1290 /*
1291 * Increase the object's reference count (number of pointers to the
1292 * memory block). If this count reaches the required minimum, the
1293 * object's color will become gray and it will be added to the
1294 * gray_list.
1295 */
1296 object->count++;
1297 if (color_gray(object)) {
1298 /* put_object() called when removing from gray_list */
1299 WARN_ON(!get_object(object));
1300 list_add_tail(&object->gray_list, &gray_list);
1301 }
1302}
1303
3c7b4e6b
CM
1304/*
1305 * Memory scanning is a long process and it needs to be interruptable. This
25985edc 1306 * function checks whether such interrupt condition occurred.
3c7b4e6b
CM
1307 */
1308static int scan_should_stop(void)
1309{
8910ae89 1310 if (!kmemleak_enabled)
3c7b4e6b
CM
1311 return 1;
1312
1313 /*
1314 * This function may be called from either process or kthread context,
1315 * hence the need to check for both stop conditions.
1316 */
1317 if (current->mm)
1318 return signal_pending(current);
1319 else
1320 return kthread_should_stop();
1321
1322 return 0;
1323}
1324
1325/*
1326 * Scan a memory block (exclusive range) for valid pointers and add those
1327 * found to the gray list.
1328 */
1329static void scan_block(void *_start, void *_end,
93ada579 1330 struct kmemleak_object *scanned)
3c7b4e6b
CM
1331{
1332 unsigned long *ptr;
1333 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1334 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
93ada579 1335 unsigned long flags;
3c7b4e6b 1336
93ada579 1337 read_lock_irqsave(&kmemleak_lock, flags);
3c7b4e6b 1338 for (ptr = start; ptr < end; ptr++) {
3c7b4e6b 1339 struct kmemleak_object *object;
8e019366 1340 unsigned long pointer;
94f4a161 1341 unsigned long excess_ref;
3c7b4e6b
CM
1342
1343 if (scan_should_stop())
1344 break;
1345
e79ed2f1 1346 kasan_disable_current();
8e019366 1347 pointer = *ptr;
e79ed2f1 1348 kasan_enable_current();
8e019366 1349
93ada579
CM
1350 if (pointer < min_addr || pointer >= max_addr)
1351 continue;
1352
1353 /*
1354 * No need for get_object() here since we hold kmemleak_lock.
1355 * object->use_count cannot be dropped to 0 while the object
1356 * is still present in object_tree_root and object_list
1357 * (with updates protected by kmemleak_lock).
1358 */
1359 object = lookup_object(pointer, 1);
3c7b4e6b
CM
1360 if (!object)
1361 continue;
93ada579 1362 if (object == scanned)
3c7b4e6b 1363 /* self referenced, ignore */
3c7b4e6b 1364 continue;
3c7b4e6b
CM
1365
1366 /*
1367 * Avoid the lockdep recursive warning on object->lock being
1368 * previously acquired in scan_object(). These locks are
1369 * enclosed by scan_mutex.
1370 */
93ada579 1371 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
94f4a161
CM
1372 /* only pass surplus references (object already gray) */
1373 if (color_gray(object)) {
1374 excess_ref = object->excess_ref;
1375 /* no need for update_refs() if object already gray */
1376 } else {
1377 excess_ref = 0;
1378 update_refs(object);
1379 }
93ada579 1380 spin_unlock(&object->lock);
94f4a161
CM
1381
1382 if (excess_ref) {
1383 object = lookup_object(excess_ref, 0);
1384 if (!object)
1385 continue;
1386 if (object == scanned)
1387 /* circular reference, ignore */
1388 continue;
1389 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1390 update_refs(object);
1391 spin_unlock(&object->lock);
1392 }
93ada579
CM
1393 }
1394 read_unlock_irqrestore(&kmemleak_lock, flags);
1395}
0587da40 1396
93ada579
CM
1397/*
1398 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1399 */
1400static void scan_large_block(void *start, void *end)
1401{
1402 void *next;
1403
1404 while (start < end) {
1405 next = min(start + MAX_SCAN_SIZE, end);
1406 scan_block(start, next, NULL);
1407 start = next;
1408 cond_resched();
3c7b4e6b
CM
1409 }
1410}
1411
1412/*
1413 * Scan a memory block corresponding to a kmemleak_object. A condition is
1414 * that object->use_count >= 1.
1415 */
1416static void scan_object(struct kmemleak_object *object)
1417{
1418 struct kmemleak_scan_area *area;
3c7b4e6b
CM
1419 unsigned long flags;
1420
1421 /*
21ae2956
UKK
1422 * Once the object->lock is acquired, the corresponding memory block
1423 * cannot be freed (the same lock is acquired in delete_object).
3c7b4e6b
CM
1424 */
1425 spin_lock_irqsave(&object->lock, flags);
1426 if (object->flags & OBJECT_NO_SCAN)
1427 goto out;
1428 if (!(object->flags & OBJECT_ALLOCATED))
1429 /* already freed object */
1430 goto out;
af98603d
CM
1431 if (hlist_empty(&object->area_list)) {
1432 void *start = (void *)object->pointer;
1433 void *end = (void *)(object->pointer + object->size);
93ada579
CM
1434 void *next;
1435
1436 do {
1437 next = min(start + MAX_SCAN_SIZE, end);
1438 scan_block(start, next, object);
af98603d 1439
93ada579
CM
1440 start = next;
1441 if (start >= end)
1442 break;
af98603d
CM
1443
1444 spin_unlock_irqrestore(&object->lock, flags);
1445 cond_resched();
1446 spin_lock_irqsave(&object->lock, flags);
93ada579 1447 } while (object->flags & OBJECT_ALLOCATED);
af98603d 1448 } else
b67bfe0d 1449 hlist_for_each_entry(area, &object->area_list, node)
c017b4be
CM
1450 scan_block((void *)area->start,
1451 (void *)(area->start + area->size),
93ada579 1452 object);
3c7b4e6b
CM
1453out:
1454 spin_unlock_irqrestore(&object->lock, flags);
1455}
1456
04609ccc
CM
1457/*
1458 * Scan the objects already referenced (gray objects). More objects will be
1459 * referenced and, if there are no memory leaks, all the objects are scanned.
1460 */
1461static void scan_gray_list(void)
1462{
1463 struct kmemleak_object *object, *tmp;
1464
1465 /*
1466 * The list traversal is safe for both tail additions and removals
1467 * from inside the loop. The kmemleak objects cannot be freed from
1468 * outside the loop because their use_count was incremented.
1469 */
1470 object = list_entry(gray_list.next, typeof(*object), gray_list);
1471 while (&object->gray_list != &gray_list) {
1472 cond_resched();
1473
1474 /* may add new objects to the list */
1475 if (!scan_should_stop())
1476 scan_object(object);
1477
1478 tmp = list_entry(object->gray_list.next, typeof(*object),
1479 gray_list);
1480
1481 /* remove the object from the list and release it */
1482 list_del(&object->gray_list);
1483 put_object(object);
1484
1485 object = tmp;
1486 }
1487 WARN_ON(!list_empty(&gray_list));
1488}
1489
3c7b4e6b
CM
1490/*
1491 * Scan data sections and all the referenced memory blocks allocated via the
1492 * kernel's standard allocators. This function must be called with the
1493 * scan_mutex held.
1494 */
1495static void kmemleak_scan(void)
1496{
1497 unsigned long flags;
04609ccc 1498 struct kmemleak_object *object;
3c7b4e6b 1499 int i;
4698c1f2 1500 int new_leaks = 0;
3c7b4e6b 1501
acf4968e
CM
1502 jiffies_last_scan = jiffies;
1503
3c7b4e6b
CM
1504 /* prepare the kmemleak_object's */
1505 rcu_read_lock();
1506 list_for_each_entry_rcu(object, &object_list, object_list) {
1507 spin_lock_irqsave(&object->lock, flags);
1508#ifdef DEBUG
1509 /*
1510 * With a few exceptions there should be a maximum of
1511 * 1 reference to any object at this point.
1512 */
1513 if (atomic_read(&object->use_count) > 1) {
ae281064 1514 pr_debug("object->use_count = %d\n",
3c7b4e6b
CM
1515 atomic_read(&object->use_count));
1516 dump_object_info(object);
1517 }
1518#endif
1519 /* reset the reference count (whiten the object) */
1520 object->count = 0;
1521 if (color_gray(object) && get_object(object))
1522 list_add_tail(&object->gray_list, &gray_list);
1523
1524 spin_unlock_irqrestore(&object->lock, flags);
1525 }
1526 rcu_read_unlock();
1527
1528 /* data/bss scanning */
93ada579
CM
1529 scan_large_block(_sdata, _edata);
1530 scan_large_block(__bss_start, __bss_stop);
906f2a51 1531 scan_large_block(__start_ro_after_init, __end_ro_after_init);
3c7b4e6b
CM
1532
1533#ifdef CONFIG_SMP
1534 /* per-cpu sections scanning */
1535 for_each_possible_cpu(i)
93ada579
CM
1536 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1537 __per_cpu_end + per_cpu_offset(i));
3c7b4e6b
CM
1538#endif
1539
1540 /*
029aeff5 1541 * Struct page scanning for each node.
3c7b4e6b 1542 */
bfc8c901 1543 get_online_mems();
3c7b4e6b 1544 for_each_online_node(i) {
108bcc96
CS
1545 unsigned long start_pfn = node_start_pfn(i);
1546 unsigned long end_pfn = node_end_pfn(i);
3c7b4e6b
CM
1547 unsigned long pfn;
1548
1549 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1550 struct page *page;
1551
1552 if (!pfn_valid(pfn))
1553 continue;
1554 page = pfn_to_page(pfn);
1555 /* only scan if page is in use */
1556 if (page_count(page) == 0)
1557 continue;
93ada579 1558 scan_block(page, page + 1, NULL);
13ab183d 1559 if (!(pfn & 63))
bde5f6bc 1560 cond_resched();
3c7b4e6b
CM
1561 }
1562 }
bfc8c901 1563 put_online_mems();
3c7b4e6b
CM
1564
1565 /*
43ed5d6e 1566 * Scanning the task stacks (may introduce false negatives).
3c7b4e6b
CM
1567 */
1568 if (kmemleak_stack_scan) {
43ed5d6e
CM
1569 struct task_struct *p, *g;
1570
3c7b4e6b 1571 read_lock(&tasklist_lock);
43ed5d6e 1572 do_each_thread(g, p) {
37df49f4
CM
1573 void *stack = try_get_task_stack(p);
1574 if (stack) {
1575 scan_block(stack, stack + THREAD_SIZE, NULL);
1576 put_task_stack(p);
1577 }
43ed5d6e 1578 } while_each_thread(g, p);
3c7b4e6b
CM
1579 read_unlock(&tasklist_lock);
1580 }
1581
1582 /*
1583 * Scan the objects already referenced from the sections scanned
04609ccc 1584 * above.
3c7b4e6b 1585 */
04609ccc 1586 scan_gray_list();
2587362e
CM
1587
1588 /*
04609ccc
CM
1589 * Check for new or unreferenced objects modified since the previous
1590 * scan and color them gray until the next scan.
2587362e
CM
1591 */
1592 rcu_read_lock();
1593 list_for_each_entry_rcu(object, &object_list, object_list) {
1594 spin_lock_irqsave(&object->lock, flags);
04609ccc
CM
1595 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1596 && update_checksum(object) && get_object(object)) {
1597 /* color it gray temporarily */
1598 object->count = object->min_count;
2587362e
CM
1599 list_add_tail(&object->gray_list, &gray_list);
1600 }
1601 spin_unlock_irqrestore(&object->lock, flags);
1602 }
1603 rcu_read_unlock();
1604
04609ccc
CM
1605 /*
1606 * Re-scan the gray list for modified unreferenced objects.
1607 */
1608 scan_gray_list();
4698c1f2 1609
17bb9e0d 1610 /*
04609ccc 1611 * If scanning was stopped do not report any new unreferenced objects.
17bb9e0d 1612 */
04609ccc 1613 if (scan_should_stop())
17bb9e0d
CM
1614 return;
1615
4698c1f2
CM
1616 /*
1617 * Scanning result reporting.
1618 */
1619 rcu_read_lock();
1620 list_for_each_entry_rcu(object, &object_list, object_list) {
1621 spin_lock_irqsave(&object->lock, flags);
1622 if (unreferenced_object(object) &&
1623 !(object->flags & OBJECT_REPORTED)) {
1624 object->flags |= OBJECT_REPORTED;
154221c3
VW
1625
1626 if (kmemleak_verbose)
1627 print_unreferenced(NULL, object);
1628
4698c1f2
CM
1629 new_leaks++;
1630 }
1631 spin_unlock_irqrestore(&object->lock, flags);
1632 }
1633 rcu_read_unlock();
1634
dc9b3f42
LZ
1635 if (new_leaks) {
1636 kmemleak_found_leaks = true;
1637
756a025f
JP
1638 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1639 new_leaks);
dc9b3f42 1640 }
4698c1f2 1641
3c7b4e6b
CM
1642}
1643
1644/*
1645 * Thread function performing automatic memory scanning. Unreferenced objects
1646 * at the end of a memory scan are reported but only the first time.
1647 */
1648static int kmemleak_scan_thread(void *arg)
1649{
1650 static int first_run = 1;
1651
ae281064 1652 pr_info("Automatic memory scanning thread started\n");
bf2a76b3 1653 set_user_nice(current, 10);
3c7b4e6b
CM
1654
1655 /*
1656 * Wait before the first scan to allow the system to fully initialize.
1657 */
1658 if (first_run) {
98c42d94 1659 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
3c7b4e6b 1660 first_run = 0;
98c42d94
VN
1661 while (timeout && !kthread_should_stop())
1662 timeout = schedule_timeout_interruptible(timeout);
3c7b4e6b
CM
1663 }
1664
1665 while (!kthread_should_stop()) {
3c7b4e6b
CM
1666 signed long timeout = jiffies_scan_wait;
1667
1668 mutex_lock(&scan_mutex);
3c7b4e6b 1669 kmemleak_scan();
3c7b4e6b 1670 mutex_unlock(&scan_mutex);
4698c1f2 1671
3c7b4e6b
CM
1672 /* wait before the next scan */
1673 while (timeout && !kthread_should_stop())
1674 timeout = schedule_timeout_interruptible(timeout);
1675 }
1676
ae281064 1677 pr_info("Automatic memory scanning thread ended\n");
3c7b4e6b
CM
1678
1679 return 0;
1680}
1681
1682/*
1683 * Start the automatic memory scanning thread. This function must be called
4698c1f2 1684 * with the scan_mutex held.
3c7b4e6b 1685 */
7eb0d5e5 1686static void start_scan_thread(void)
3c7b4e6b
CM
1687{
1688 if (scan_thread)
1689 return;
1690 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1691 if (IS_ERR(scan_thread)) {
598d8091 1692 pr_warn("Failed to create the scan thread\n");
3c7b4e6b
CM
1693 scan_thread = NULL;
1694 }
1695}
1696
1697/*
914b6dff 1698 * Stop the automatic memory scanning thread.
3c7b4e6b 1699 */
7eb0d5e5 1700static void stop_scan_thread(void)
3c7b4e6b
CM
1701{
1702 if (scan_thread) {
1703 kthread_stop(scan_thread);
1704 scan_thread = NULL;
1705 }
1706}
1707
1708/*
1709 * Iterate over the object_list and return the first valid object at or after
1710 * the required position with its use_count incremented. The function triggers
1711 * a memory scanning when the pos argument points to the first position.
1712 */
1713static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1714{
1715 struct kmemleak_object *object;
1716 loff_t n = *pos;
b87324d0
CM
1717 int err;
1718
1719 err = mutex_lock_interruptible(&scan_mutex);
1720 if (err < 0)
1721 return ERR_PTR(err);
3c7b4e6b 1722
3c7b4e6b
CM
1723 rcu_read_lock();
1724 list_for_each_entry_rcu(object, &object_list, object_list) {
1725 if (n-- > 0)
1726 continue;
1727 if (get_object(object))
1728 goto out;
1729 }
1730 object = NULL;
1731out:
3c7b4e6b
CM
1732 return object;
1733}
1734
1735/*
1736 * Return the next object in the object_list. The function decrements the
1737 * use_count of the previous object and increases that of the next one.
1738 */
1739static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1740{
1741 struct kmemleak_object *prev_obj = v;
1742 struct kmemleak_object *next_obj = NULL;
58fac095 1743 struct kmemleak_object *obj = prev_obj;
3c7b4e6b
CM
1744
1745 ++(*pos);
3c7b4e6b 1746
58fac095 1747 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
52c3ce4e
CM
1748 if (get_object(obj)) {
1749 next_obj = obj;
3c7b4e6b 1750 break;
52c3ce4e 1751 }
3c7b4e6b 1752 }
288c857d 1753
3c7b4e6b
CM
1754 put_object(prev_obj);
1755 return next_obj;
1756}
1757
1758/*
1759 * Decrement the use_count of the last object required, if any.
1760 */
1761static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1762{
b87324d0
CM
1763 if (!IS_ERR(v)) {
1764 /*
1765 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1766 * waiting was interrupted, so only release it if !IS_ERR.
1767 */
f5886c7f 1768 rcu_read_unlock();
b87324d0
CM
1769 mutex_unlock(&scan_mutex);
1770 if (v)
1771 put_object(v);
1772 }
3c7b4e6b
CM
1773}
1774
1775/*
1776 * Print the information for an unreferenced object to the seq file.
1777 */
1778static int kmemleak_seq_show(struct seq_file *seq, void *v)
1779{
1780 struct kmemleak_object *object = v;
1781 unsigned long flags;
1782
1783 spin_lock_irqsave(&object->lock, flags);
288c857d 1784 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
17bb9e0d 1785 print_unreferenced(seq, object);
3c7b4e6b
CM
1786 spin_unlock_irqrestore(&object->lock, flags);
1787 return 0;
1788}
1789
1790static const struct seq_operations kmemleak_seq_ops = {
1791 .start = kmemleak_seq_start,
1792 .next = kmemleak_seq_next,
1793 .stop = kmemleak_seq_stop,
1794 .show = kmemleak_seq_show,
1795};
1796
1797static int kmemleak_open(struct inode *inode, struct file *file)
1798{
b87324d0 1799 return seq_open(file, &kmemleak_seq_ops);
3c7b4e6b
CM
1800}
1801
189d84ed
CM
1802static int dump_str_object_info(const char *str)
1803{
1804 unsigned long flags;
1805 struct kmemleak_object *object;
1806 unsigned long addr;
1807
dc053733
AP
1808 if (kstrtoul(str, 0, &addr))
1809 return -EINVAL;
189d84ed
CM
1810 object = find_and_get_object(addr, 0);
1811 if (!object) {
1812 pr_info("Unknown object at 0x%08lx\n", addr);
1813 return -EINVAL;
1814 }
1815
1816 spin_lock_irqsave(&object->lock, flags);
1817 dump_object_info(object);
1818 spin_unlock_irqrestore(&object->lock, flags);
1819
1820 put_object(object);
1821 return 0;
1822}
1823
30b37101
LR
1824/*
1825 * We use grey instead of black to ensure we can do future scans on the same
1826 * objects. If we did not do future scans these black objects could
1827 * potentially contain references to newly allocated objects in the future and
1828 * we'd end up with false positives.
1829 */
1830static void kmemleak_clear(void)
1831{
1832 struct kmemleak_object *object;
1833 unsigned long flags;
1834
1835 rcu_read_lock();
1836 list_for_each_entry_rcu(object, &object_list, object_list) {
1837 spin_lock_irqsave(&object->lock, flags);
1838 if ((object->flags & OBJECT_REPORTED) &&
1839 unreferenced_object(object))
a1084c87 1840 __paint_it(object, KMEMLEAK_GREY);
30b37101
LR
1841 spin_unlock_irqrestore(&object->lock, flags);
1842 }
1843 rcu_read_unlock();
dc9b3f42
LZ
1844
1845 kmemleak_found_leaks = false;
30b37101
LR
1846}
1847
c89da70c
LZ
1848static void __kmemleak_do_cleanup(void);
1849
3c7b4e6b
CM
1850/*
1851 * File write operation to configure kmemleak at run-time. The following
1852 * commands can be written to the /sys/kernel/debug/kmemleak file:
1853 * off - disable kmemleak (irreversible)
1854 * stack=on - enable the task stacks scanning
1855 * stack=off - disable the tasks stacks scanning
1856 * scan=on - start the automatic memory scanning thread
1857 * scan=off - stop the automatic memory scanning thread
1858 * scan=... - set the automatic memory scanning period in seconds (0 to
1859 * disable it)
4698c1f2 1860 * scan - trigger a memory scan
30b37101 1861 * clear - mark all current reported unreferenced kmemleak objects as
c89da70c
LZ
1862 * grey to ignore printing them, or free all kmemleak objects
1863 * if kmemleak has been disabled.
189d84ed 1864 * dump=... - dump information about the object found at the given address
3c7b4e6b
CM
1865 */
1866static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1867 size_t size, loff_t *ppos)
1868{
1869 char buf[64];
1870 int buf_size;
b87324d0 1871 int ret;
3c7b4e6b
CM
1872
1873 buf_size = min(size, (sizeof(buf) - 1));
1874 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1875 return -EFAULT;
1876 buf[buf_size] = 0;
1877
b87324d0
CM
1878 ret = mutex_lock_interruptible(&scan_mutex);
1879 if (ret < 0)
1880 return ret;
1881
c89da70c 1882 if (strncmp(buf, "clear", 5) == 0) {
8910ae89 1883 if (kmemleak_enabled)
c89da70c
LZ
1884 kmemleak_clear();
1885 else
1886 __kmemleak_do_cleanup();
1887 goto out;
1888 }
1889
8910ae89 1890 if (!kmemleak_enabled) {
c89da70c
LZ
1891 ret = -EBUSY;
1892 goto out;
1893 }
1894
3c7b4e6b
CM
1895 if (strncmp(buf, "off", 3) == 0)
1896 kmemleak_disable();
1897 else if (strncmp(buf, "stack=on", 8) == 0)
1898 kmemleak_stack_scan = 1;
1899 else if (strncmp(buf, "stack=off", 9) == 0)
1900 kmemleak_stack_scan = 0;
1901 else if (strncmp(buf, "scan=on", 7) == 0)
1902 start_scan_thread();
1903 else if (strncmp(buf, "scan=off", 8) == 0)
1904 stop_scan_thread();
1905 else if (strncmp(buf, "scan=", 5) == 0) {
1906 unsigned long secs;
3c7b4e6b 1907
3dbb95f7 1908 ret = kstrtoul(buf + 5, 0, &secs);
b87324d0
CM
1909 if (ret < 0)
1910 goto out;
3c7b4e6b
CM
1911 stop_scan_thread();
1912 if (secs) {
1913 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1914 start_scan_thread();
1915 }
4698c1f2
CM
1916 } else if (strncmp(buf, "scan", 4) == 0)
1917 kmemleak_scan();
189d84ed
CM
1918 else if (strncmp(buf, "dump=", 5) == 0)
1919 ret = dump_str_object_info(buf + 5);
4698c1f2 1920 else
b87324d0
CM
1921 ret = -EINVAL;
1922
1923out:
1924 mutex_unlock(&scan_mutex);
1925 if (ret < 0)
1926 return ret;
3c7b4e6b
CM
1927
1928 /* ignore the rest of the buffer, only one command at a time */
1929 *ppos += size;
1930 return size;
1931}
1932
1933static const struct file_operations kmemleak_fops = {
1934 .owner = THIS_MODULE,
1935 .open = kmemleak_open,
1936 .read = seq_read,
1937 .write = kmemleak_write,
1938 .llseek = seq_lseek,
5f3bf19a 1939 .release = seq_release,
3c7b4e6b
CM
1940};
1941
c89da70c
LZ
1942static void __kmemleak_do_cleanup(void)
1943{
1944 struct kmemleak_object *object;
1945
1946 rcu_read_lock();
1947 list_for_each_entry_rcu(object, &object_list, object_list)
1948 delete_object_full(object->pointer);
1949 rcu_read_unlock();
1950}
1951
3c7b4e6b 1952/*
74341703
CM
1953 * Stop the memory scanning thread and free the kmemleak internal objects if
1954 * no previous scan thread (otherwise, kmemleak may still have some useful
1955 * information on memory leaks).
3c7b4e6b 1956 */
179a8100 1957static void kmemleak_do_cleanup(struct work_struct *work)
3c7b4e6b 1958{
3c7b4e6b 1959 stop_scan_thread();
3c7b4e6b 1960
914b6dff 1961 mutex_lock(&scan_mutex);
c5f3b1a5 1962 /*
914b6dff
VM
1963 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1964 * longer track object freeing. Ordering of the scan thread stopping and
1965 * the memory accesses below is guaranteed by the kthread_stop()
1966 * function.
c5f3b1a5
CM
1967 */
1968 kmemleak_free_enabled = 0;
914b6dff 1969 mutex_unlock(&scan_mutex);
c5f3b1a5 1970
c89da70c
LZ
1971 if (!kmemleak_found_leaks)
1972 __kmemleak_do_cleanup();
1973 else
756a025f 1974 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
3c7b4e6b
CM
1975}
1976
179a8100 1977static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
3c7b4e6b
CM
1978
1979/*
1980 * Disable kmemleak. No memory allocation/freeing will be traced once this
1981 * function is called. Disabling kmemleak is an irreversible operation.
1982 */
1983static void kmemleak_disable(void)
1984{
1985 /* atomically check whether it was already invoked */
8910ae89 1986 if (cmpxchg(&kmemleak_error, 0, 1))
3c7b4e6b
CM
1987 return;
1988
1989 /* stop any memory operation tracing */
8910ae89 1990 kmemleak_enabled = 0;
3c7b4e6b
CM
1991
1992 /* check whether it is too early for a kernel thread */
8910ae89 1993 if (kmemleak_initialized)
179a8100 1994 schedule_work(&cleanup_work);
c5f3b1a5
CM
1995 else
1996 kmemleak_free_enabled = 0;
3c7b4e6b
CM
1997
1998 pr_info("Kernel memory leak detector disabled\n");
1999}
2000
2001/*
2002 * Allow boot-time kmemleak disabling (enabled by default).
2003 */
8bd30c10 2004static int __init kmemleak_boot_config(char *str)
3c7b4e6b
CM
2005{
2006 if (!str)
2007 return -EINVAL;
2008 if (strcmp(str, "off") == 0)
2009 kmemleak_disable();
ab0155a2
JB
2010 else if (strcmp(str, "on") == 0)
2011 kmemleak_skip_disable = 1;
2012 else
3c7b4e6b
CM
2013 return -EINVAL;
2014 return 0;
2015}
2016early_param("kmemleak", kmemleak_boot_config);
2017
5f79020c
CM
2018static void __init print_log_trace(struct early_log *log)
2019{
2020 struct stack_trace trace;
2021
2022 trace.nr_entries = log->trace_len;
2023 trace.entries = log->trace;
2024
2025 pr_notice("Early log backtrace:\n");
2026 print_stack_trace(&trace, 2);
2027}
2028
3c7b4e6b 2029/*
2030117d 2030 * Kmemleak initialization.
3c7b4e6b
CM
2031 */
2032void __init kmemleak_init(void)
2033{
2034 int i;
2035 unsigned long flags;
2036
ab0155a2
JB
2037#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2038 if (!kmemleak_skip_disable) {
3551a928 2039 kmemleak_early_log = 0;
ab0155a2
JB
2040 kmemleak_disable();
2041 return;
2042 }
2043#endif
2044
3c7b4e6b
CM
2045 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2046 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2047
2048 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2049 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
3c7b4e6b 2050
21cd3a60 2051 if (crt_early_log > ARRAY_SIZE(early_log))
598d8091
JP
2052 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2053 crt_early_log);
b6693005 2054
3c7b4e6b
CM
2055 /* the kernel is still in UP mode, so disabling the IRQs is enough */
2056 local_irq_save(flags);
3551a928 2057 kmemleak_early_log = 0;
8910ae89 2058 if (kmemleak_error) {
b6693005
CM
2059 local_irq_restore(flags);
2060 return;
c5f3b1a5 2061 } else {
8910ae89 2062 kmemleak_enabled = 1;
c5f3b1a5
CM
2063 kmemleak_free_enabled = 1;
2064 }
3c7b4e6b
CM
2065 local_irq_restore(flags);
2066
2067 /*
2068 * This is the point where tracking allocations is safe. Automatic
2069 * scanning is started during the late initcall. Add the early logged
2070 * callbacks to the kmemleak infrastructure.
2071 */
2072 for (i = 0; i < crt_early_log; i++) {
2073 struct early_log *log = &early_log[i];
2074
2075 switch (log->op_type) {
2076 case KMEMLEAK_ALLOC:
fd678967 2077 early_alloc(log);
3c7b4e6b 2078 break;
f528f0b8
CM
2079 case KMEMLEAK_ALLOC_PERCPU:
2080 early_alloc_percpu(log);
2081 break;
3c7b4e6b
CM
2082 case KMEMLEAK_FREE:
2083 kmemleak_free(log->ptr);
2084 break;
53238a60
CM
2085 case KMEMLEAK_FREE_PART:
2086 kmemleak_free_part(log->ptr, log->size);
2087 break;
f528f0b8
CM
2088 case KMEMLEAK_FREE_PERCPU:
2089 kmemleak_free_percpu(log->ptr);
2090 break;
3c7b4e6b
CM
2091 case KMEMLEAK_NOT_LEAK:
2092 kmemleak_not_leak(log->ptr);
2093 break;
2094 case KMEMLEAK_IGNORE:
2095 kmemleak_ignore(log->ptr);
2096 break;
2097 case KMEMLEAK_SCAN_AREA:
c017b4be 2098 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
3c7b4e6b
CM
2099 break;
2100 case KMEMLEAK_NO_SCAN:
2101 kmemleak_no_scan(log->ptr);
2102 break;
94f4a161
CM
2103 case KMEMLEAK_SET_EXCESS_REF:
2104 object_set_excess_ref((unsigned long)log->ptr,
2105 log->excess_ref);
2106 break;
3c7b4e6b 2107 default:
5f79020c
CM
2108 kmemleak_warn("Unknown early log operation: %d\n",
2109 log->op_type);
2110 }
2111
8910ae89 2112 if (kmemleak_warning) {
5f79020c 2113 print_log_trace(log);
8910ae89 2114 kmemleak_warning = 0;
3c7b4e6b
CM
2115 }
2116 }
2117}
2118
2119/*
2120 * Late initialization function.
2121 */
2122static int __init kmemleak_late_init(void)
2123{
2124 struct dentry *dentry;
2125
8910ae89 2126 kmemleak_initialized = 1;
3c7b4e6b 2127
b353756b
VW
2128 dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2129 &kmemleak_fops);
2130 if (!dentry)
2131 pr_warn("Failed to create the debugfs kmemleak file\n");
2132
8910ae89 2133 if (kmemleak_error) {
3c7b4e6b 2134 /*
25985edc 2135 * Some error occurred and kmemleak was disabled. There is a
3c7b4e6b
CM
2136 * small chance that kmemleak_disable() was called immediately
2137 * after setting kmemleak_initialized and we may end up with
2138 * two clean-up threads but serialized by scan_mutex.
2139 */
179a8100 2140 schedule_work(&cleanup_work);
3c7b4e6b
CM
2141 return -ENOMEM;
2142 }
2143
4698c1f2 2144 mutex_lock(&scan_mutex);
3c7b4e6b 2145 start_scan_thread();
4698c1f2 2146 mutex_unlock(&scan_mutex);
3c7b4e6b
CM
2147
2148 pr_info("Kernel memory leak detector initialized\n");
2149
2150 return 0;
2151}
2152late_initcall(kmemleak_late_init);