]> git.ipfire.org Git - people/ms/linux.git/blame - kernel/kprobes.c
tracing: Use pause-on-trace with the latency tracers
[people/ms/linux.git] / kernel / kprobes.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * Kernel Probes (KProbes)
4 * kernel/kprobes.c
5 *
1da177e4
LT
6 * Copyright (C) IBM Corporation, 2002, 2004
7 *
8 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
9 * Probes initial implementation (includes suggestions from
10 * Rusty Russell).
11 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
12 * hlists and exceptions notifier as suggested by Andi Kleen.
13 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
14 * interface to access function arguments.
15 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
16 * exceptions notifier to be first on the priority list.
b94cce92
HN
17 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
18 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
19 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
20 */
21#include <linux/kprobes.h>
1da177e4
LT
22#include <linux/hash.h>
23#include <linux/init.h>
4e57b681 24#include <linux/slab.h>
e3869792 25#include <linux/stddef.h>
9984de1a 26#include <linux/export.h>
9ec4b1f3 27#include <linux/moduleloader.h>
3a872d89 28#include <linux/kallsyms.h>
b4c6c34a 29#include <linux/freezer.h>
346fd59b
SD
30#include <linux/seq_file.h>
31#include <linux/debugfs.h>
b2be84df 32#include <linux/sysctl.h>
1eeb66a1 33#include <linux/kdebug.h>
4460fdad 34#include <linux/memory.h>
4554dbcb 35#include <linux/ftrace.h>
afd66255 36#include <linux/cpu.h>
bf5438fc 37#include <linux/jump_label.h>
69e49088 38#include <linux/perf_event.h>
bf8f6e5b 39
bfd45be0 40#include <asm/sections.h>
1da177e4
LT
41#include <asm/cacheflush.h>
42#include <asm/errno.h>
7c0f6ba6 43#include <linux/uaccess.h>
1da177e4
LT
44
45#define KPROBE_HASH_BITS 6
46#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
47
3a872d89 48
ef53d9c5 49static int kprobes_initialized;
7e6a71d8
MH
50/* kprobe_table can be accessed by
51 * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
52 * Or
53 * - RCU hlist traversal under disabling preempt (breakpoint handlers)
54 */
1da177e4
LT
55static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
56
bf8f6e5b 57/* NOTE: change this value only with kprobe_mutex held */
e579abeb 58static bool kprobes_all_disarmed;
bf8f6e5b 59
43948f50
MH
60/* This protects kprobe_table and optimizing_list */
61static DEFINE_MUTEX(kprobe_mutex);
e6584523 62static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5 63
290e3070
NR
64kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
65 unsigned int __unused)
49e0b465
NR
66{
67 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
68}
69
376e2424
MH
70/* Blacklist -- list of struct kprobe_blacklist_entry */
71static LIST_HEAD(kprobe_blacklist);
3d8d996e 72
2d14e39d 73#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
74/*
75 * kprobe->ainsn.insn points to the copy of the instruction to be
76 * single-stepped. x86_64, POWER4 and above have no-exec support and
77 * stepping on the instruction on a vmalloced/kmalloced/data page
78 * is a recipe for disaster
79 */
9ec4b1f3 80struct kprobe_insn_page {
c5cb5a2d 81 struct list_head list;
9ec4b1f3 82 kprobe_opcode_t *insns; /* Page of instruction slots */
af96397d 83 struct kprobe_insn_cache *cache;
9ec4b1f3 84 int nused;
b4c6c34a 85 int ngarbage;
4610ee1d 86 char slot_used[];
9ec4b1f3
AM
87};
88
4610ee1d
MH
89#define KPROBE_INSN_PAGE_SIZE(slots) \
90 (offsetof(struct kprobe_insn_page, slot_used) + \
91 (sizeof(char) * (slots)))
92
4610ee1d
MH
93static int slots_per_page(struct kprobe_insn_cache *c)
94{
95 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
96}
97
ab40c5c6
MH
98enum kprobe_slot_state {
99 SLOT_CLEAN = 0,
100 SLOT_DIRTY = 1,
101 SLOT_USED = 2,
102};
103
63fef14f 104void __weak *alloc_insn_page(void)
af96397d
HC
105{
106 return module_alloc(PAGE_SIZE);
107}
108
c93f5cf5 109void __weak free_insn_page(void *page)
af96397d 110{
be1f221c 111 module_memfree(page);
af96397d
HC
112}
113
c802d64a
HC
114struct kprobe_insn_cache kprobe_insn_slots = {
115 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
af96397d
HC
116 .alloc = alloc_insn_page,
117 .free = free_insn_page,
d002b8bc 118 .sym = KPROBE_INSN_PAGE_SYM,
4610ee1d
MH
119 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
120 .insn_size = MAX_INSN_SIZE,
121 .nr_garbage = 0,
122};
55479f64 123static int collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a 124
9ec4b1f3 125/**
12941560 126 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
127 * We allocate an executable page if there's no room on existing ones.
128 */
55479f64 129kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f3
AM
130{
131 struct kprobe_insn_page *kip;
c802d64a 132 kprobe_opcode_t *slot = NULL;
9ec4b1f3 133
5b485629 134 /* Since the slot array is not protected by rcu, we need a mutex */
c802d64a 135 mutex_lock(&c->mutex);
6f716acd 136 retry:
5b485629
MH
137 rcu_read_lock();
138 list_for_each_entry_rcu(kip, &c->pages, list) {
4610ee1d 139 if (kip->nused < slots_per_page(c)) {
9ec4b1f3 140 int i;
4610ee1d 141 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6
MH
142 if (kip->slot_used[i] == SLOT_CLEAN) {
143 kip->slot_used[i] = SLOT_USED;
9ec4b1f3 144 kip->nused++;
c802d64a 145 slot = kip->insns + (i * c->insn_size);
5b485629 146 rcu_read_unlock();
c802d64a 147 goto out;
9ec4b1f3
AM
148 }
149 }
4610ee1d
MH
150 /* kip->nused is broken. Fix it. */
151 kip->nused = slots_per_page(c);
152 WARN_ON(1);
9ec4b1f3
AM
153 }
154 }
5b485629 155 rcu_read_unlock();
9ec4b1f3 156
b4c6c34a 157 /* If there are any garbage slots, collect it and try again. */
4610ee1d 158 if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a 159 goto retry;
4610ee1d
MH
160
161 /* All out of space. Need to allocate a new page. */
162 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd 163 if (!kip)
c802d64a 164 goto out;
9ec4b1f3
AM
165
166 /*
167 * Use module_alloc so this page is within +/- 2GB of where the
168 * kernel image and loaded module images reside. This is required
169 * so x86_64 can correctly handle the %rip-relative fixups.
170 */
af96397d 171 kip->insns = c->alloc();
9ec4b1f3
AM
172 if (!kip->insns) {
173 kfree(kip);
c802d64a 174 goto out;
9ec4b1f3 175 }
c5cb5a2d 176 INIT_LIST_HEAD(&kip->list);
4610ee1d 177 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6 178 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 179 kip->nused = 1;
b4c6c34a 180 kip->ngarbage = 0;
af96397d 181 kip->cache = c;
5b485629 182 list_add_rcu(&kip->list, &c->pages);
c802d64a 183 slot = kip->insns;
69e49088
AH
184
185 /* Record the perf ksymbol register event after adding the page */
186 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
187 PAGE_SIZE, false, c->sym);
c802d64a
HC
188out:
189 mutex_unlock(&c->mutex);
190 return slot;
12941560
MH
191}
192
b4c6c34a 193/* Return 1 if all garbages are collected, otherwise 0. */
55479f64 194static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
b4c6c34a 195{
ab40c5c6 196 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
197 kip->nused--;
198 if (kip->nused == 0) {
199 /*
200 * Page is no longer in use. Free it unless
201 * it's the last one. We keep the last one
202 * so as not to have to set it up again the
203 * next time somebody inserts a probe.
204 */
4610ee1d 205 if (!list_is_singular(&kip->list)) {
69e49088
AH
206 /*
207 * Record perf ksymbol unregister event before removing
208 * the page.
209 */
210 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
211 (unsigned long)kip->insns, PAGE_SIZE, true,
212 kip->cache->sym);
5b485629
MH
213 list_del_rcu(&kip->list);
214 synchronize_rcu();
af96397d 215 kip->cache->free(kip->insns);
b4c6c34a
MH
216 kfree(kip);
217 }
218 return 1;
219 }
220 return 0;
221}
222
55479f64 223static int collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a 224{
c5cb5a2d 225 struct kprobe_insn_page *kip, *next;
b4c6c34a 226
615d0ebb 227 /* Ensure no-one is interrupted on the garbages */
ae8b7ce7 228 synchronize_rcu();
b4c6c34a 229
4610ee1d 230 list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a 231 int i;
b4c6c34a
MH
232 if (kip->ngarbage == 0)
233 continue;
234 kip->ngarbage = 0; /* we will collect all garbages */
4610ee1d 235 for (i = 0; i < slots_per_page(c); i++) {
5b485629 236 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
b4c6c34a
MH
237 break;
238 }
239 }
4610ee1d 240 c->nr_garbage = 0;
b4c6c34a
MH
241 return 0;
242}
243
55479f64
MH
244void __free_insn_slot(struct kprobe_insn_cache *c,
245 kprobe_opcode_t *slot, int dirty)
9ec4b1f3
AM
246{
247 struct kprobe_insn_page *kip;
5b485629 248 long idx;
9ec4b1f3 249
c802d64a 250 mutex_lock(&c->mutex);
5b485629
MH
251 rcu_read_lock();
252 list_for_each_entry_rcu(kip, &c->pages, list) {
253 idx = ((long)slot - (long)kip->insns) /
254 (c->insn_size * sizeof(kprobe_opcode_t));
255 if (idx >= 0 && idx < slots_per_page(c))
c802d64a 256 goto out;
9ec4b1f3 257 }
5b485629 258 /* Could not find this slot. */
4610ee1d 259 WARN_ON(1);
5b485629 260 kip = NULL;
c802d64a 261out:
5b485629
MH
262 rcu_read_unlock();
263 /* Mark and sweep: this may sleep */
264 if (kip) {
265 /* Check double free */
266 WARN_ON(kip->slot_used[idx] != SLOT_USED);
267 if (dirty) {
268 kip->slot_used[idx] = SLOT_DIRTY;
269 kip->ngarbage++;
270 if (++c->nr_garbage > slots_per_page(c))
271 collect_garbage_slots(c);
272 } else {
273 collect_one_slot(kip, idx);
274 }
275 }
c802d64a 276 mutex_unlock(&c->mutex);
4610ee1d 277}
6f716acd 278
5b485629
MH
279/*
280 * Check given address is on the page of kprobe instruction slots.
281 * This will be used for checking whether the address on a stack
282 * is on a text area or not.
283 */
284bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
285{
286 struct kprobe_insn_page *kip;
287 bool ret = false;
288
289 rcu_read_lock();
290 list_for_each_entry_rcu(kip, &c->pages, list) {
291 if (addr >= (unsigned long)kip->insns &&
292 addr < (unsigned long)kip->insns + PAGE_SIZE) {
293 ret = true;
294 break;
295 }
296 }
297 rcu_read_unlock();
298
299 return ret;
300}
301
d002b8bc
AH
302int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
303 unsigned long *value, char *type, char *sym)
304{
305 struct kprobe_insn_page *kip;
306 int ret = -ERANGE;
307
308 rcu_read_lock();
309 list_for_each_entry_rcu(kip, &c->pages, list) {
310 if ((*symnum)--)
311 continue;
312 strlcpy(sym, c->sym, KSYM_NAME_LEN);
313 *type = 't';
314 *value = (unsigned long)kip->insns;
315 ret = 0;
316 break;
317 }
318 rcu_read_unlock();
319
320 return ret;
321}
322
afd66255
MH
323#ifdef CONFIG_OPTPROBES
324/* For optimized_kprobe buffer */
c802d64a
HC
325struct kprobe_insn_cache kprobe_optinsn_slots = {
326 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
af96397d
HC
327 .alloc = alloc_insn_page,
328 .free = free_insn_page,
d002b8bc 329 .sym = KPROBE_OPTINSN_PAGE_SYM,
afd66255
MH
330 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
331 /* .insn_size is initialized later */
332 .nr_garbage = 0,
333};
afd66255 334#endif
2d14e39d 335#endif
9ec4b1f3 336
e6584523
AM
337/* We have preemption disabled.. so it is safe to use __ versions */
338static inline void set_kprobe_instance(struct kprobe *kp)
339{
b76834bc 340 __this_cpu_write(kprobe_instance, kp);
e6584523
AM
341}
342
343static inline void reset_kprobe_instance(void)
344{
b76834bc 345 __this_cpu_write(kprobe_instance, NULL);
e6584523
AM
346}
347
3516a460
AM
348/*
349 * This routine is called either:
49a2a1b8 350 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 351 * OR
d217d545 352 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 353 */
820aede0 354struct kprobe *get_kprobe(void *addr)
1da177e4
LT
355{
356 struct hlist_head *head;
3516a460 357 struct kprobe *p;
1da177e4
LT
358
359 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
6743ad43
MH
360 hlist_for_each_entry_rcu(p, head, hlist,
361 lockdep_is_held(&kprobe_mutex)) {
1da177e4
LT
362 if (p->addr == addr)
363 return p;
364 }
afd66255 365
1da177e4
LT
366 return NULL;
367}
820aede0 368NOKPROBE_SYMBOL(get_kprobe);
1da177e4 369
820aede0 370static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
afd66255
MH
371
372/* Return true if the kprobe is an aggregator */
373static inline int kprobe_aggrprobe(struct kprobe *p)
374{
375 return p->pre_handler == aggr_pre_handler;
376}
377
6274de49
MH
378/* Return true(!0) if the kprobe is unused */
379static inline int kprobe_unused(struct kprobe *p)
380{
381 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
382 list_empty(&p->list);
383}
384
afd66255
MH
385/*
386 * Keep all fields in the kprobe consistent
387 */
6d8e40a8 388static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
afd66255 389{
6d8e40a8
MH
390 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
391 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
afd66255
MH
392}
393
394#ifdef CONFIG_OPTPROBES
b2be84df
MH
395/* NOTE: change this value only with kprobe_mutex held */
396static bool kprobes_allow_optimization;
397
afd66255
MH
398/*
399 * Call all pre_handler on the list, but ignores its return value.
400 * This must be called from arch-dep optimized caller.
401 */
820aede0 402void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
afd66255
MH
403{
404 struct kprobe *kp;
405
406 list_for_each_entry_rcu(kp, &p->list, list) {
407 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
408 set_kprobe_instance(kp);
4f3a8714 409 kp->pre_handler(kp, regs);
afd66255
MH
410 }
411 reset_kprobe_instance();
412 }
413}
820aede0 414NOKPROBE_SYMBOL(opt_pre_handler);
afd66255 415
6274de49 416/* Free optimized instructions and optimized_kprobe */
55479f64 417static void free_aggr_kprobe(struct kprobe *p)
6274de49
MH
418{
419 struct optimized_kprobe *op;
420
421 op = container_of(p, struct optimized_kprobe, kp);
422 arch_remove_optimized_kprobe(op);
423 arch_remove_kprobe(p);
424 kfree(op);
425}
426
afd66255
MH
427/* Return true(!0) if the kprobe is ready for optimization. */
428static inline int kprobe_optready(struct kprobe *p)
429{
430 struct optimized_kprobe *op;
431
432 if (kprobe_aggrprobe(p)) {
433 op = container_of(p, struct optimized_kprobe, kp);
434 return arch_prepared_optinsn(&op->optinsn);
435 }
436
437 return 0;
438}
439
6274de49
MH
440/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
441static inline int kprobe_disarmed(struct kprobe *p)
442{
443 struct optimized_kprobe *op;
444
445 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
446 if (!kprobe_aggrprobe(p))
447 return kprobe_disabled(p);
448
449 op = container_of(p, struct optimized_kprobe, kp);
450
451 return kprobe_disabled(p) && list_empty(&op->list);
452}
453
454/* Return true(!0) if the probe is queued on (un)optimizing lists */
55479f64 455static int kprobe_queued(struct kprobe *p)
6274de49
MH
456{
457 struct optimized_kprobe *op;
458
459 if (kprobe_aggrprobe(p)) {
460 op = container_of(p, struct optimized_kprobe, kp);
461 if (!list_empty(&op->list))
462 return 1;
463 }
464 return 0;
465}
466
afd66255
MH
467/*
468 * Return an optimized kprobe whose optimizing code replaces
469 * instructions including addr (exclude breakpoint).
470 */
55479f64 471static struct kprobe *get_optimized_kprobe(unsigned long addr)
afd66255
MH
472{
473 int i;
474 struct kprobe *p = NULL;
475 struct optimized_kprobe *op;
476
477 /* Don't check i == 0, since that is a breakpoint case. */
478 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
479 p = get_kprobe((void *)(addr - i));
480
481 if (p && kprobe_optready(p)) {
482 op = container_of(p, struct optimized_kprobe, kp);
483 if (arch_within_optimized_kprobe(op, addr))
484 return p;
485 }
486
487 return NULL;
488}
489
490/* Optimization staging list, protected by kprobe_mutex */
491static LIST_HEAD(optimizing_list);
6274de49 492static LIST_HEAD(unoptimizing_list);
7b959fc5 493static LIST_HEAD(freeing_list);
afd66255
MH
494
495static void kprobe_optimizer(struct work_struct *work);
496static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
497#define OPTIMIZE_DELAY 5
498
61f4e13f
MH
499/*
500 * Optimize (replace a breakpoint with a jump) kprobes listed on
501 * optimizing_list.
502 */
55479f64 503static void do_optimize_kprobes(void)
afd66255 504{
f1c6ece2 505 lockdep_assert_held(&text_mutex);
afd66255
MH
506 /*
507 * The optimization/unoptimization refers online_cpus via
508 * stop_machine() and cpu-hotplug modifies online_cpus.
509 * And same time, text_mutex will be held in cpu-hotplug and here.
510 * This combination can cause a deadlock (cpu-hotplug try to lock
511 * text_mutex but stop_machine can not be done because online_cpus
512 * has been changed)
2d1e38f5 513 * To avoid this deadlock, caller must have locked cpu hotplug
afd66255
MH
514 * for preventing cpu-hotplug outside of text_mutex locking.
515 */
2d1e38f5
TG
516 lockdep_assert_cpus_held();
517
518 /* Optimization never be done when disarmed */
519 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
520 list_empty(&optimizing_list))
521 return;
522
cd7ebe22 523 arch_optimize_kprobes(&optimizing_list);
61f4e13f
MH
524}
525
6274de49
MH
526/*
527 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
528 * if need) kprobes listed on unoptimizing_list.
529 */
55479f64 530static void do_unoptimize_kprobes(void)
6274de49
MH
531{
532 struct optimized_kprobe *op, *tmp;
533
f1c6ece2 534 lockdep_assert_held(&text_mutex);
2d1e38f5
TG
535 /* See comment in do_optimize_kprobes() */
536 lockdep_assert_cpus_held();
537
6274de49
MH
538 /* Unoptimization must be done anytime */
539 if (list_empty(&unoptimizing_list))
540 return;
541
7b959fc5 542 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
f984ba4e 543 /* Loop free_list for disarming */
7b959fc5 544 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
f66c0447
MH
545 /* Switching from detour code to origin */
546 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
6274de49
MH
547 /* Disarm probes if marked disabled */
548 if (kprobe_disabled(&op->kp))
549 arch_disarm_kprobe(&op->kp);
550 if (kprobe_unused(&op->kp)) {
551 /*
552 * Remove unused probes from hash list. After waiting
553 * for synchronization, these probes are reclaimed.
554 * (reclaiming is done by do_free_cleaned_kprobes.)
555 */
556 hlist_del_rcu(&op->kp.hlist);
6274de49
MH
557 } else
558 list_del_init(&op->list);
559 }
6274de49
MH
560}
561
562/* Reclaim all kprobes on the free_list */
55479f64 563static void do_free_cleaned_kprobes(void)
6274de49
MH
564{
565 struct optimized_kprobe *op, *tmp;
566
7b959fc5 567 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
6274de49 568 list_del_init(&op->list);
cbdd96f5
MH
569 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
570 /*
571 * This must not happen, but if there is a kprobe
572 * still in use, keep it on kprobes hash list.
573 */
574 continue;
575 }
6274de49
MH
576 free_aggr_kprobe(&op->kp);
577 }
578}
579
580/* Start optimizer after OPTIMIZE_DELAY passed */
55479f64 581static void kick_kprobe_optimizer(void)
6274de49 582{
ad72b3be 583 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
6274de49
MH
584}
585
61f4e13f 586/* Kprobe jump optimizer */
55479f64 587static void kprobe_optimizer(struct work_struct *work)
61f4e13f 588{
72ef3794 589 mutex_lock(&kprobe_mutex);
2d1e38f5 590 cpus_read_lock();
f1c6ece2 591 mutex_lock(&text_mutex);
61f4e13f
MH
592
593 /*
6274de49
MH
594 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
595 * kprobes before waiting for quiesence period.
596 */
7b959fc5 597 do_unoptimize_kprobes();
6274de49
MH
598
599 /*
a30b85df
MH
600 * Step 2: Wait for quiesence period to ensure all potentially
601 * preempted tasks to have normally scheduled. Because optprobe
602 * may modify multiple instructions, there is a chance that Nth
603 * instruction is preempted. In that case, such tasks can return
604 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
605 * Note that on non-preemptive kernel, this is transparently converted
606 * to synchronoze_sched() to wait for all interrupts to have completed.
61f4e13f 607 */
a30b85df 608 synchronize_rcu_tasks();
61f4e13f 609
6274de49 610 /* Step 3: Optimize kprobes after quiesence period */
61f4e13f 611 do_optimize_kprobes();
6274de49
MH
612
613 /* Step 4: Free cleaned kprobes after quiesence period */
7b959fc5 614 do_free_cleaned_kprobes();
6274de49 615
f1c6ece2 616 mutex_unlock(&text_mutex);
2d1e38f5 617 cpus_read_unlock();
6274de49 618
cd7ebe22 619 /* Step 5: Kick optimizer again if needed */
f984ba4e 620 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
cd7ebe22 621 kick_kprobe_optimizer();
1a0aa991
MH
622
623 mutex_unlock(&kprobe_mutex);
6274de49
MH
624}
625
626/* Wait for completing optimization and unoptimization */
30e7d894 627void wait_for_kprobe_optimizer(void)
6274de49 628{
ad72b3be
TH
629 mutex_lock(&kprobe_mutex);
630
631 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
632 mutex_unlock(&kprobe_mutex);
633
634 /* this will also make optimizing_work execute immmediately */
635 flush_delayed_work(&optimizing_work);
636 /* @optimizing_work might not have been queued yet, relax */
637 cpu_relax();
638
639 mutex_lock(&kprobe_mutex);
640 }
641
642 mutex_unlock(&kprobe_mutex);
afd66255
MH
643}
644
e4add247
MH
645static bool optprobe_queued_unopt(struct optimized_kprobe *op)
646{
647 struct optimized_kprobe *_op;
648
649 list_for_each_entry(_op, &unoptimizing_list, list) {
650 if (op == _op)
651 return true;
652 }
653
654 return false;
655}
656
afd66255 657/* Optimize kprobe if p is ready to be optimized */
55479f64 658static void optimize_kprobe(struct kprobe *p)
afd66255
MH
659{
660 struct optimized_kprobe *op;
661
662 /* Check if the kprobe is disabled or not ready for optimization. */
b2be84df 663 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
afd66255
MH
664 (kprobe_disabled(p) || kprobes_all_disarmed))
665 return;
666
059053a2
MH
667 /* kprobes with post_handler can not be optimized */
668 if (p->post_handler)
afd66255
MH
669 return;
670
671 op = container_of(p, struct optimized_kprobe, kp);
672
673 /* Check there is no other kprobes at the optimized instructions */
674 if (arch_check_optimized_kprobe(op) < 0)
675 return;
676
677 /* Check if it is already optimized. */
e4add247
MH
678 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
679 if (optprobe_queued_unopt(op)) {
680 /* This is under unoptimizing. Just dequeue the probe */
681 list_del_init(&op->list);
682 }
afd66255 683 return;
e4add247 684 }
afd66255 685 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
6274de49 686
e4add247
MH
687 /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
688 if (WARN_ON_ONCE(!list_empty(&op->list)))
689 return;
690
691 list_add(&op->list, &optimizing_list);
692 kick_kprobe_optimizer();
6274de49
MH
693}
694
695/* Short cut to direct unoptimizing */
55479f64 696static void force_unoptimize_kprobe(struct optimized_kprobe *op)
6274de49 697{
2d1e38f5 698 lockdep_assert_cpus_held();
6274de49 699 arch_unoptimize_kprobe(op);
f66c0447 700 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
afd66255
MH
701}
702
703/* Unoptimize a kprobe if p is optimized */
55479f64 704static void unoptimize_kprobe(struct kprobe *p, bool force)
afd66255
MH
705{
706 struct optimized_kprobe *op;
707
6274de49
MH
708 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
709 return; /* This is not an optprobe nor optimized */
710
711 op = container_of(p, struct optimized_kprobe, kp);
e4add247 712 if (!kprobe_optimized(p))
6274de49 713 return;
6274de49 714
6274de49 715 if (!list_empty(&op->list)) {
e4add247
MH
716 if (optprobe_queued_unopt(op)) {
717 /* Queued in unoptimizing queue */
718 if (force) {
719 /*
720 * Forcibly unoptimize the kprobe here, and queue it
721 * in the freeing list for release afterwards.
722 */
723 force_unoptimize_kprobe(op);
724 list_move(&op->list, &freeing_list);
725 }
726 } else {
727 /* Dequeue from the optimizing queue */
728 list_del_init(&op->list);
729 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
730 }
6274de49
MH
731 return;
732 }
e4add247 733
6274de49 734 /* Optimized kprobe case */
e4add247 735 if (force) {
6274de49
MH
736 /* Forcibly update the code: this is a special case */
737 force_unoptimize_kprobe(op);
e4add247 738 } else {
6274de49
MH
739 list_add(&op->list, &unoptimizing_list);
740 kick_kprobe_optimizer();
afd66255
MH
741 }
742}
743
0490cd1f 744/* Cancel unoptimizing for reusing */
819319fc 745static int reuse_unused_kprobe(struct kprobe *ap)
0490cd1f
MH
746{
747 struct optimized_kprobe *op;
748
0490cd1f
MH
749 /*
750 * Unused kprobe MUST be on the way of delayed unoptimizing (means
751 * there is still a relative jump) and disabled.
752 */
753 op = container_of(ap, struct optimized_kprobe, kp);
4458515b 754 WARN_ON_ONCE(list_empty(&op->list));
0490cd1f
MH
755 /* Enable the probe again */
756 ap->flags &= ~KPROBE_FLAG_DISABLED;
757 /* Optimize it again (remove from op->list) */
5f843ed4
MH
758 if (!kprobe_optready(ap))
759 return -EINVAL;
819319fc 760
0490cd1f 761 optimize_kprobe(ap);
819319fc 762 return 0;
0490cd1f
MH
763}
764
afd66255 765/* Remove optimized instructions */
55479f64 766static void kill_optimized_kprobe(struct kprobe *p)
afd66255
MH
767{
768 struct optimized_kprobe *op;
769
770 op = container_of(p, struct optimized_kprobe, kp);
6274de49
MH
771 if (!list_empty(&op->list))
772 /* Dequeue from the (un)optimization queue */
afd66255 773 list_del_init(&op->list);
6274de49 774 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
7b959fc5
MH
775
776 if (kprobe_unused(p)) {
777 /* Enqueue if it is unused */
778 list_add(&op->list, &freeing_list);
779 /*
780 * Remove unused probes from the hash list. After waiting
781 * for synchronization, this probe is reclaimed.
782 * (reclaiming is done by do_free_cleaned_kprobes().)
783 */
784 hlist_del_rcu(&op->kp.hlist);
785 }
786
6274de49 787 /* Don't touch the code, because it is already freed. */
afd66255
MH
788 arch_remove_optimized_kprobe(op);
789}
790
a460246c
MH
791static inline
792void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
793{
794 if (!kprobe_ftrace(p))
795 arch_prepare_optimized_kprobe(op, p);
796}
797
afd66255 798/* Try to prepare optimized instructions */
55479f64 799static void prepare_optimized_kprobe(struct kprobe *p)
afd66255
MH
800{
801 struct optimized_kprobe *op;
802
803 op = container_of(p, struct optimized_kprobe, kp);
a460246c 804 __prepare_optimized_kprobe(op, p);
afd66255
MH
805}
806
afd66255 807/* Allocate new optimized_kprobe and try to prepare optimized instructions */
55479f64 808static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
809{
810 struct optimized_kprobe *op;
811
812 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
813 if (!op)
814 return NULL;
815
816 INIT_LIST_HEAD(&op->list);
817 op->kp.addr = p->addr;
a460246c 818 __prepare_optimized_kprobe(op, p);
afd66255
MH
819
820 return &op->kp;
821}
822
55479f64 823static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
afd66255
MH
824
825/*
826 * Prepare an optimized_kprobe and optimize it
827 * NOTE: p must be a normal registered kprobe
828 */
55479f64 829static void try_to_optimize_kprobe(struct kprobe *p)
afd66255
MH
830{
831 struct kprobe *ap;
832 struct optimized_kprobe *op;
833
ae6aa16f
MH
834 /* Impossible to optimize ftrace-based kprobe */
835 if (kprobe_ftrace(p))
836 return;
837
25764288 838 /* For preparing optimization, jump_label_text_reserved() is called */
2d1e38f5 839 cpus_read_lock();
25764288
MH
840 jump_label_lock();
841 mutex_lock(&text_mutex);
842
afd66255
MH
843 ap = alloc_aggr_kprobe(p);
844 if (!ap)
25764288 845 goto out;
afd66255
MH
846
847 op = container_of(ap, struct optimized_kprobe, kp);
848 if (!arch_prepared_optinsn(&op->optinsn)) {
849 /* If failed to setup optimizing, fallback to kprobe */
6274de49
MH
850 arch_remove_optimized_kprobe(op);
851 kfree(op);
25764288 852 goto out;
afd66255
MH
853 }
854
855 init_aggr_kprobe(ap, p);
25764288
MH
856 optimize_kprobe(ap); /* This just kicks optimizer thread */
857
858out:
859 mutex_unlock(&text_mutex);
860 jump_label_unlock();
2d1e38f5 861 cpus_read_unlock();
afd66255
MH
862}
863
b2be84df 864#ifdef CONFIG_SYSCTL
55479f64 865static void optimize_all_kprobes(void)
b2be84df
MH
866{
867 struct hlist_head *head;
b2be84df
MH
868 struct kprobe *p;
869 unsigned int i;
870
5c51543b 871 mutex_lock(&kprobe_mutex);
b2be84df
MH
872 /* If optimization is already allowed, just return */
873 if (kprobes_allow_optimization)
5c51543b 874 goto out;
b2be84df 875
2d1e38f5 876 cpus_read_lock();
b2be84df 877 kprobes_allow_optimization = true;
b2be84df
MH
878 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
879 head = &kprobe_table[i];
7e6a71d8 880 hlist_for_each_entry(p, head, hlist)
b2be84df
MH
881 if (!kprobe_disabled(p))
882 optimize_kprobe(p);
883 }
2d1e38f5 884 cpus_read_unlock();
b2be84df 885 printk(KERN_INFO "Kprobes globally optimized\n");
5c51543b
MH
886out:
887 mutex_unlock(&kprobe_mutex);
b2be84df
MH
888}
889
55479f64 890static void unoptimize_all_kprobes(void)
b2be84df
MH
891{
892 struct hlist_head *head;
b2be84df
MH
893 struct kprobe *p;
894 unsigned int i;
895
5c51543b 896 mutex_lock(&kprobe_mutex);
b2be84df 897 /* If optimization is already prohibited, just return */
5c51543b
MH
898 if (!kprobes_allow_optimization) {
899 mutex_unlock(&kprobe_mutex);
b2be84df 900 return;
5c51543b 901 }
b2be84df 902
2d1e38f5 903 cpus_read_lock();
b2be84df 904 kprobes_allow_optimization = false;
b2be84df
MH
905 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
906 head = &kprobe_table[i];
7e6a71d8 907 hlist_for_each_entry(p, head, hlist) {
b2be84df 908 if (!kprobe_disabled(p))
6274de49 909 unoptimize_kprobe(p, false);
b2be84df
MH
910 }
911 }
2d1e38f5 912 cpus_read_unlock();
5c51543b
MH
913 mutex_unlock(&kprobe_mutex);
914
6274de49
MH
915 /* Wait for unoptimizing completion */
916 wait_for_kprobe_optimizer();
917 printk(KERN_INFO "Kprobes globally unoptimized\n");
b2be84df
MH
918}
919
5c51543b 920static DEFINE_MUTEX(kprobe_sysctl_mutex);
b2be84df
MH
921int sysctl_kprobes_optimization;
922int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
32927393 923 void *buffer, size_t *length,
b2be84df
MH
924 loff_t *ppos)
925{
926 int ret;
927
5c51543b 928 mutex_lock(&kprobe_sysctl_mutex);
b2be84df
MH
929 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
930 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
931
932 if (sysctl_kprobes_optimization)
933 optimize_all_kprobes();
934 else
935 unoptimize_all_kprobes();
5c51543b 936 mutex_unlock(&kprobe_sysctl_mutex);
b2be84df
MH
937
938 return ret;
939}
940#endif /* CONFIG_SYSCTL */
941
6274de49 942/* Put a breakpoint for a probe. Must be called with text_mutex locked */
55479f64 943static void __arm_kprobe(struct kprobe *p)
afd66255 944{
6d8e40a8 945 struct kprobe *_p;
afd66255
MH
946
947 /* Check collision with other optimized kprobes */
6d8e40a8
MH
948 _p = get_optimized_kprobe((unsigned long)p->addr);
949 if (unlikely(_p))
6274de49
MH
950 /* Fallback to unoptimized kprobe */
951 unoptimize_kprobe(_p, true);
afd66255
MH
952
953 arch_arm_kprobe(p);
954 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
955}
956
6274de49 957/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
55479f64 958static void __disarm_kprobe(struct kprobe *p, bool reopt)
afd66255 959{
6d8e40a8 960 struct kprobe *_p;
afd66255 961
69d54b91
WN
962 /* Try to unoptimize */
963 unoptimize_kprobe(p, kprobes_all_disarmed);
afd66255 964
6274de49
MH
965 if (!kprobe_queued(p)) {
966 arch_disarm_kprobe(p);
967 /* If another kprobe was blocked, optimize it. */
968 _p = get_optimized_kprobe((unsigned long)p->addr);
969 if (unlikely(_p) && reopt)
970 optimize_kprobe(_p);
971 }
972 /* TODO: reoptimize others after unoptimized this probe */
afd66255
MH
973}
974
975#else /* !CONFIG_OPTPROBES */
976
977#define optimize_kprobe(p) do {} while (0)
6274de49 978#define unoptimize_kprobe(p, f) do {} while (0)
afd66255
MH
979#define kill_optimized_kprobe(p) do {} while (0)
980#define prepare_optimized_kprobe(p) do {} while (0)
981#define try_to_optimize_kprobe(p) do {} while (0)
982#define __arm_kprobe(p) arch_arm_kprobe(p)
6274de49
MH
983#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
984#define kprobe_disarmed(p) kprobe_disabled(p)
985#define wait_for_kprobe_optimizer() do {} while (0)
afd66255 986
819319fc 987static int reuse_unused_kprobe(struct kprobe *ap)
0490cd1f 988{
819319fc
MH
989 /*
990 * If the optimized kprobe is NOT supported, the aggr kprobe is
991 * released at the same time that the last aggregated kprobe is
992 * unregistered.
993 * Thus there should be no chance to reuse unused kprobe.
994 */
0490cd1f 995 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
819319fc 996 return -EINVAL;
0490cd1f
MH
997}
998
55479f64 999static void free_aggr_kprobe(struct kprobe *p)
afd66255 1000{
6274de49 1001 arch_remove_kprobe(p);
afd66255
MH
1002 kfree(p);
1003}
1004
55479f64 1005static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
1006{
1007 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1008}
1009#endif /* CONFIG_OPTPROBES */
1010
e7dbfe34 1011#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f 1012static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
0bc11ed5
MH
1013 .func = kprobe_ftrace_handler,
1014 .flags = FTRACE_OPS_FL_SAVE_REGS,
1015};
1016
1017static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
e5253896 1018 .func = kprobe_ftrace_handler,
1d70be34 1019 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
ae6aa16f 1020};
0bc11ed5
MH
1021
1022static int kprobe_ipmodify_enabled;
ae6aa16f
MH
1023static int kprobe_ftrace_enabled;
1024
1025/* Must ensure p->addr is really on ftrace */
55479f64 1026static int prepare_kprobe(struct kprobe *p)
ae6aa16f
MH
1027{
1028 if (!kprobe_ftrace(p))
1029 return arch_prepare_kprobe(p);
1030
1031 return arch_prepare_kprobe_ftrace(p);
1032}
1033
1034/* Caller must lock kprobe_mutex */
0bc11ed5
MH
1035static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1036 int *cnt)
ae6aa16f 1037{
12310e34 1038 int ret = 0;
ae6aa16f 1039
0bc11ed5 1040 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
12310e34 1041 if (ret) {
4458515b
MH
1042 pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
1043 p->addr, ret);
12310e34
JY
1044 return ret;
1045 }
1046
0bc11ed5
MH
1047 if (*cnt == 0) {
1048 ret = register_ftrace_function(ops);
12310e34
JY
1049 if (ret) {
1050 pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
1051 goto err_ftrace;
1052 }
ae6aa16f 1053 }
12310e34 1054
0bc11ed5 1055 (*cnt)++;
12310e34
JY
1056 return ret;
1057
1058err_ftrace:
1059 /*
0bc11ed5
MH
1060 * At this point, sinec ops is not registered, we should be sefe from
1061 * registering empty filter.
12310e34 1062 */
0bc11ed5 1063 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
12310e34 1064 return ret;
ae6aa16f
MH
1065}
1066
0bc11ed5
MH
1067static int arm_kprobe_ftrace(struct kprobe *p)
1068{
1069 bool ipmodify = (p->post_handler != NULL);
1070
1071 return __arm_kprobe_ftrace(p,
1072 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1073 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1074}
1075
ae6aa16f 1076/* Caller must lock kprobe_mutex */
0bc11ed5
MH
1077static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1078 int *cnt)
ae6aa16f 1079{
297f9233 1080 int ret = 0;
ae6aa16f 1081
0bc11ed5
MH
1082 if (*cnt == 1) {
1083 ret = unregister_ftrace_function(ops);
297f9233
JY
1084 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1085 return ret;
ae6aa16f 1086 }
297f9233 1087
0bc11ed5 1088 (*cnt)--;
297f9233 1089
0bc11ed5 1090 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
4458515b
MH
1091 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
1092 p->addr, ret);
297f9233 1093 return ret;
ae6aa16f 1094}
0bc11ed5
MH
1095
1096static int disarm_kprobe_ftrace(struct kprobe *p)
1097{
1098 bool ipmodify = (p->post_handler != NULL);
1099
1100 return __disarm_kprobe_ftrace(p,
1101 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1102 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1103}
e7dbfe34 1104#else /* !CONFIG_KPROBES_ON_FTRACE */
10de795a
MS
1105static inline int prepare_kprobe(struct kprobe *p)
1106{
1107 return arch_prepare_kprobe(p);
1108}
1109
1110static inline int arm_kprobe_ftrace(struct kprobe *p)
1111{
1112 return -ENODEV;
1113}
1114
1115static inline int disarm_kprobe_ftrace(struct kprobe *p)
1116{
1117 return -ENODEV;
1118}
ae6aa16f
MH
1119#endif
1120
201517a7 1121/* Arm a kprobe with text_mutex */
12310e34 1122static int arm_kprobe(struct kprobe *kp)
201517a7 1123{
12310e34
JY
1124 if (unlikely(kprobe_ftrace(kp)))
1125 return arm_kprobe_ftrace(kp);
1126
2d1e38f5 1127 cpus_read_lock();
201517a7 1128 mutex_lock(&text_mutex);
afd66255 1129 __arm_kprobe(kp);
201517a7 1130 mutex_unlock(&text_mutex);
2d1e38f5 1131 cpus_read_unlock();
12310e34
JY
1132
1133 return 0;
201517a7
MH
1134}
1135
1136/* Disarm a kprobe with text_mutex */
297f9233 1137static int disarm_kprobe(struct kprobe *kp, bool reopt)
201517a7 1138{
297f9233
JY
1139 if (unlikely(kprobe_ftrace(kp)))
1140 return disarm_kprobe_ftrace(kp);
2d1e38f5
TG
1141
1142 cpus_read_lock();
201517a7 1143 mutex_lock(&text_mutex);
ae6aa16f 1144 __disarm_kprobe(kp, reopt);
201517a7 1145 mutex_unlock(&text_mutex);
2d1e38f5 1146 cpus_read_unlock();
297f9233
JY
1147
1148 return 0;
201517a7
MH
1149}
1150
64f562c6
AM
1151/*
1152 * Aggregate handlers for multiple kprobes support - these handlers
1153 * take care of invoking the individual kprobe handlers on p->list
1154 */
820aede0 1155static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
1156{
1157 struct kprobe *kp;
1158
3516a460 1159 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1160 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 1161 set_kprobe_instance(kp);
8b0914ea
PP
1162 if (kp->pre_handler(kp, regs))
1163 return 1;
64f562c6 1164 }
e6584523 1165 reset_kprobe_instance();
64f562c6
AM
1166 }
1167 return 0;
1168}
820aede0 1169NOKPROBE_SYMBOL(aggr_pre_handler);
64f562c6 1170
820aede0
MH
1171static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1172 unsigned long flags)
64f562c6
AM
1173{
1174 struct kprobe *kp;
1175
3516a460 1176 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1177 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 1178 set_kprobe_instance(kp);
64f562c6 1179 kp->post_handler(kp, regs, flags);
e6584523 1180 reset_kprobe_instance();
64f562c6
AM
1181 }
1182 }
64f562c6 1183}
820aede0 1184NOKPROBE_SYMBOL(aggr_post_handler);
64f562c6 1185
820aede0
MH
1186static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1187 int trapnr)
64f562c6 1188{
b76834bc 1189 struct kprobe *cur = __this_cpu_read(kprobe_instance);
e6584523 1190
64f562c6
AM
1191 /*
1192 * if we faulted "during" the execution of a user specified
1193 * probe handler, invoke just that probe's fault handler
1194 */
e6584523
AM
1195 if (cur && cur->fault_handler) {
1196 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
1197 return 1;
1198 }
1199 return 0;
1200}
820aede0 1201NOKPROBE_SYMBOL(aggr_fault_handler);
64f562c6 1202
bf8d5c52 1203/* Walks the list and increments nmissed count for multiprobe case */
820aede0 1204void kprobes_inc_nmissed_count(struct kprobe *p)
bf8d5c52
KA
1205{
1206 struct kprobe *kp;
afd66255 1207 if (!kprobe_aggrprobe(p)) {
bf8d5c52
KA
1208 p->nmissed++;
1209 } else {
1210 list_for_each_entry_rcu(kp, &p->list, list)
1211 kp->nmissed++;
1212 }
1213 return;
1214}
820aede0 1215NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
bf8d5c52 1216
d741bf41
PZ
1217static void free_rp_inst_rcu(struct rcu_head *head)
1218{
1219 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
1220
1221 if (refcount_dec_and_test(&ri->rph->ref))
1222 kfree(ri->rph);
1223 kfree(ri);
1224}
1225NOKPROBE_SYMBOL(free_rp_inst_rcu);
1226
b3388178 1227static void recycle_rp_inst(struct kretprobe_instance *ri)
b94cce92 1228{
d741bf41 1229 struct kretprobe *rp = get_kretprobe(ri);
ef53d9c5 1230
ef53d9c5 1231 if (likely(rp)) {
6e426e0f 1232 freelist_add(&ri->freelist, &rp->freelist);
b94cce92 1233 } else
d741bf41 1234 call_rcu(&ri->rcu, free_rp_inst_rcu);
b94cce92 1235}
820aede0 1236NOKPROBE_SYMBOL(recycle_rp_inst);
b94cce92 1237
319f0ce2 1238static struct kprobe kprobe_busy = {
9b38cc70
JO
1239 .addr = (void *) get_kprobe,
1240};
1241
1242void kprobe_busy_begin(void)
1243{
1244 struct kprobe_ctlblk *kcb;
1245
1246 preempt_disable();
1247 __this_cpu_write(current_kprobe, &kprobe_busy);
1248 kcb = get_kprobe_ctlblk();
1249 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1250}
1251
1252void kprobe_busy_end(void)
1253{
1254 __this_cpu_write(current_kprobe, NULL);
1255 preempt_enable();
1256}
1257
b94cce92 1258/*
c6fd91f0 1259 * This function is called from finish_task_switch when task tk becomes dead,
1260 * so that we can recycle any function-return probe instances associated
1261 * with this task. These left over instances represent probed functions
1262 * that have been called but will never return.
b94cce92 1263 */
820aede0 1264void kprobe_flush_task(struct task_struct *tk)
b94cce92 1265{
62c27be0 1266 struct kretprobe_instance *ri;
d741bf41 1267 struct llist_node *node;
802eae7c 1268
d741bf41 1269 /* Early boot, not yet initialized. */
ef53d9c5 1270 if (unlikely(!kprobes_initialized))
ef53d9c5
S
1271 return;
1272
9b38cc70
JO
1273 kprobe_busy_begin();
1274
d741bf41
PZ
1275 node = __llist_del_all(&tk->kretprobe_instances);
1276 while (node) {
1277 ri = container_of(node, struct kretprobe_instance, llist);
1278 node = node->next;
1279
1280 recycle_rp_inst(ri);
62c27be0 1281 }
9b38cc70
JO
1282
1283 kprobe_busy_end();
b94cce92 1284}
820aede0 1285NOKPROBE_SYMBOL(kprobe_flush_task);
b94cce92 1286
b94cce92
HN
1287static inline void free_rp_inst(struct kretprobe *rp)
1288{
1289 struct kretprobe_instance *ri;
6e426e0f 1290 struct freelist_node *node;
d741bf41 1291 int count = 0;
4c4308cb 1292
6e426e0f
PZ
1293 node = rp->freelist.head;
1294 while (node) {
1295 ri = container_of(node, struct kretprobe_instance, freelist);
1296 node = node->next;
1297
b94cce92 1298 kfree(ri);
d741bf41 1299 count++;
b94cce92 1300 }
ef53d9c5 1301
d741bf41
PZ
1302 if (refcount_sub_and_test(count, &rp->rph->ref)) {
1303 kfree(rp->rph);
1304 rp->rph = NULL;
4a296e07 1305 }
4a296e07
MH
1306}
1307
059053a2 1308/* Add the new probe to ap->list */
55479f64 1309static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 1310{
059053a2 1311 if (p->post_handler)
6274de49 1312 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
afd66255 1313
059053a2 1314 list_add_rcu(&p->list, &ap->list);
b918e5e6
MH
1315 if (p->post_handler && !ap->post_handler)
1316 ap->post_handler = aggr_post_handler;
de5bd88d 1317
8b0914ea
PP
1318 return 0;
1319}
1320
64f562c6
AM
1321/*
1322 * Fill in the required fields of the "manager kprobe". Replace the
1323 * earlier kprobe in the hlist with the manager kprobe
1324 */
55479f64 1325static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
64f562c6 1326{
afd66255 1327 /* Copy p's insn slot to ap */
8b0914ea 1328 copy_kprobe(p, ap);
a9ad965e 1329 flush_insn_slot(ap);
64f562c6 1330 ap->addr = p->addr;
afd66255 1331 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
64f562c6 1332 ap->pre_handler = aggr_pre_handler;
64f562c6 1333 ap->fault_handler = aggr_fault_handler;
e8386a0c
MH
1334 /* We don't care the kprobe which has gone. */
1335 if (p->post_handler && !kprobe_gone(p))
36721656 1336 ap->post_handler = aggr_post_handler;
64f562c6
AM
1337
1338 INIT_LIST_HEAD(&ap->list);
afd66255 1339 INIT_HLIST_NODE(&ap->hlist);
64f562c6 1340
afd66255 1341 list_add_rcu(&p->list, &ap->list);
adad0f33 1342 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
1343}
1344
1345/*
1346 * This is the second or subsequent kprobe at the address - handle
1347 * the intricacies
64f562c6 1348 */
55479f64 1349static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
64f562c6
AM
1350{
1351 int ret = 0;
6d8e40a8 1352 struct kprobe *ap = orig_p;
64f562c6 1353
2d1e38f5
TG
1354 cpus_read_lock();
1355
25764288
MH
1356 /* For preparing optimization, jump_label_text_reserved() is called */
1357 jump_label_lock();
25764288
MH
1358 mutex_lock(&text_mutex);
1359
6d8e40a8
MH
1360 if (!kprobe_aggrprobe(orig_p)) {
1361 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1362 ap = alloc_aggr_kprobe(orig_p);
25764288
MH
1363 if (!ap) {
1364 ret = -ENOMEM;
1365 goto out;
1366 }
6d8e40a8 1367 init_aggr_kprobe(ap, orig_p);
819319fc 1368 } else if (kprobe_unused(ap)) {
0490cd1f 1369 /* This probe is going to die. Rescue it */
819319fc
MH
1370 ret = reuse_unused_kprobe(ap);
1371 if (ret)
1372 goto out;
1373 }
b918e5e6
MH
1374
1375 if (kprobe_gone(ap)) {
e8386a0c
MH
1376 /*
1377 * Attempting to insert new probe at the same location that
1378 * had a probe in the module vaddr area which already
1379 * freed. So, the instruction slot has already been
1380 * released. We need a new slot for the new probe.
1381 */
b918e5e6 1382 ret = arch_prepare_kprobe(ap);
e8386a0c 1383 if (ret)
b918e5e6
MH
1384 /*
1385 * Even if fail to allocate new slot, don't need to
1386 * free aggr_probe. It will be used next time, or
1387 * freed by unregister_kprobe.
1388 */
25764288 1389 goto out;
de5bd88d 1390
afd66255
MH
1391 /* Prepare optimized instructions if possible. */
1392 prepare_optimized_kprobe(ap);
1393
e8386a0c 1394 /*
de5bd88d
MH
1395 * Clear gone flag to prevent allocating new slot again, and
1396 * set disabled flag because it is not armed yet.
e8386a0c 1397 */
de5bd88d
MH
1398 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1399 | KPROBE_FLAG_DISABLED;
e8386a0c 1400 }
b918e5e6 1401
afd66255 1402 /* Copy ap's insn slot to p */
b918e5e6 1403 copy_kprobe(ap, p);
25764288
MH
1404 ret = add_new_kprobe(ap, p);
1405
1406out:
1407 mutex_unlock(&text_mutex);
25764288 1408 jump_label_unlock();
2d1e38f5 1409 cpus_read_unlock();
25764288
MH
1410
1411 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1412 ap->flags &= ~KPROBE_FLAG_DISABLED;
12310e34 1413 if (!kprobes_all_disarmed) {
25764288 1414 /* Arm the breakpoint again. */
12310e34
JY
1415 ret = arm_kprobe(ap);
1416 if (ret) {
1417 ap->flags |= KPROBE_FLAG_DISABLED;
1418 list_del_rcu(&p->list);
ae8b7ce7 1419 synchronize_rcu();
12310e34
JY
1420 }
1421 }
25764288
MH
1422 }
1423 return ret;
64f562c6
AM
1424}
1425
be8f2743
MH
1426bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1427{
1428 /* The __kprobes marked functions and entry code must not be probed */
1429 return addr >= (unsigned long)__kprobes_text_start &&
1430 addr < (unsigned long)__kprobes_text_end;
1431}
1432
6143c6fb 1433static bool __within_kprobe_blacklist(unsigned long addr)
d0aaff97 1434{
376e2424 1435 struct kprobe_blacklist_entry *ent;
3d8d996e 1436
be8f2743 1437 if (arch_within_kprobe_blacklist(addr))
376e2424 1438 return true;
3d8d996e
SD
1439 /*
1440 * If there exists a kprobe_blacklist, verify and
1441 * fail any probe registration in the prohibited area
1442 */
376e2424
MH
1443 list_for_each_entry(ent, &kprobe_blacklist, list) {
1444 if (addr >= ent->start_addr && addr < ent->end_addr)
1445 return true;
3d8d996e 1446 }
6143c6fb
MH
1447 return false;
1448}
376e2424 1449
6143c6fb
MH
1450bool within_kprobe_blacklist(unsigned long addr)
1451{
1452 char symname[KSYM_NAME_LEN], *p;
1453
1454 if (__within_kprobe_blacklist(addr))
1455 return true;
1456
1457 /* Check if the address is on a suffixed-symbol */
1458 if (!lookup_symbol_name(addr, symname)) {
1459 p = strchr(symname, '.');
1460 if (!p)
1461 return false;
1462 *p = '\0';
1463 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1464 if (addr)
1465 return __within_kprobe_blacklist(addr);
1466 }
376e2424 1467 return false;
d0aaff97
PP
1468}
1469
b2a5cd69
MH
1470/*
1471 * If we have a symbol_name argument, look it up and add the offset field
1472 * to it. This way, we can specify a relative address to a symbol.
bc81d48d
MH
1473 * This returns encoded errors if it fails to look up symbol or invalid
1474 * combination of parameters.
b2a5cd69 1475 */
1d585e70
NR
1476static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1477 const char *symbol_name, unsigned int offset)
b2a5cd69 1478{
1d585e70 1479 if ((symbol_name && addr) || (!symbol_name && !addr))
bc81d48d
MH
1480 goto invalid;
1481
1d585e70 1482 if (symbol_name) {
7246f600 1483 addr = kprobe_lookup_name(symbol_name, offset);
bc81d48d
MH
1484 if (!addr)
1485 return ERR_PTR(-ENOENT);
b2a5cd69
MH
1486 }
1487
1d585e70 1488 addr = (kprobe_opcode_t *)(((char *)addr) + offset);
bc81d48d
MH
1489 if (addr)
1490 return addr;
1491
1492invalid:
1493 return ERR_PTR(-EINVAL);
b2a5cd69
MH
1494}
1495
1d585e70
NR
1496static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1497{
1498 return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1499}
1500
1f0ab409 1501/* Check passed kprobe is valid and return kprobe in kprobe_table. */
55479f64 1502static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1f0ab409 1503{
6d8e40a8 1504 struct kprobe *ap, *list_p;
1f0ab409 1505
7e6a71d8
MH
1506 lockdep_assert_held(&kprobe_mutex);
1507
6d8e40a8
MH
1508 ap = get_kprobe(p->addr);
1509 if (unlikely(!ap))
1f0ab409
AM
1510 return NULL;
1511
6d8e40a8 1512 if (p != ap) {
7e6a71d8 1513 list_for_each_entry(list_p, &ap->list, list)
1f0ab409
AM
1514 if (list_p == p)
1515 /* kprobe p is a valid probe */
1516 goto valid;
1517 return NULL;
1518 }
1519valid:
6d8e40a8 1520 return ap;
1f0ab409
AM
1521}
1522
1523/* Return error if the kprobe is being re-registered */
1524static inline int check_kprobe_rereg(struct kprobe *p)
1525{
1526 int ret = 0;
1f0ab409
AM
1527
1528 mutex_lock(&kprobe_mutex);
6d8e40a8 1529 if (__get_valid_kprobe(p))
1f0ab409
AM
1530 ret = -EINVAL;
1531 mutex_unlock(&kprobe_mutex);
6d8e40a8 1532
1f0ab409
AM
1533 return ret;
1534}
1535
f7f242ff 1536int __weak arch_check_ftrace_location(struct kprobe *p)
1da177e4 1537{
ae6aa16f
MH
1538 unsigned long ftrace_addr;
1539
ae6aa16f
MH
1540 ftrace_addr = ftrace_location((unsigned long)p->addr);
1541 if (ftrace_addr) {
e7dbfe34 1542#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f
MH
1543 /* Given address is not on the instruction boundary */
1544 if ((unsigned long)p->addr != ftrace_addr)
1545 return -EILSEQ;
ae6aa16f 1546 p->flags |= KPROBE_FLAG_FTRACE;
e7dbfe34 1547#else /* !CONFIG_KPROBES_ON_FTRACE */
ae6aa16f
MH
1548 return -EINVAL;
1549#endif
1550 }
f7f242ff
HC
1551 return 0;
1552}
1553
1554static int check_kprobe_address_safe(struct kprobe *p,
1555 struct module **probed_mod)
1556{
1557 int ret;
1f0ab409 1558
f7f242ff
HC
1559 ret = arch_check_ftrace_location(p);
1560 if (ret)
1561 return ret;
91bad2f8 1562 jump_label_lock();
de31c3ca 1563 preempt_disable();
f7fa6ef0
MH
1564
1565 /* Ensure it is not in reserved area nor out of text */
ec30c5f3 1566 if (!kernel_text_address((unsigned long) p->addr) ||
376e2424 1567 within_kprobe_blacklist((unsigned long) p->addr) ||
e336b402
MH
1568 jump_label_text_reserved(p->addr, p->addr) ||
1569 find_bug((unsigned long)p->addr)) {
f986a499 1570 ret = -EINVAL;
f7fa6ef0 1571 goto out;
f986a499 1572 }
b3e55c72 1573
f7fa6ef0
MH
1574 /* Check if are we probing a module */
1575 *probed_mod = __module_text_address((unsigned long) p->addr);
1576 if (*probed_mod) {
6f716acd 1577 /*
e8386a0c
MH
1578 * We must hold a refcount of the probed module while updating
1579 * its code to prohibit unexpected unloading.
df019b1d 1580 */
f7fa6ef0
MH
1581 if (unlikely(!try_module_get(*probed_mod))) {
1582 ret = -ENOENT;
1583 goto out;
1584 }
de31c3ca 1585
f24659d9
MH
1586 /*
1587 * If the module freed .init.text, we couldn't insert
1588 * kprobes in there.
1589 */
f7fa6ef0
MH
1590 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1591 (*probed_mod)->state != MODULE_STATE_COMING) {
1592 module_put(*probed_mod);
1593 *probed_mod = NULL;
1594 ret = -ENOENT;
f24659d9 1595 }
df019b1d 1596 }
f7fa6ef0 1597out:
a189d035 1598 preempt_enable();
de31c3ca 1599 jump_label_unlock();
1da177e4 1600
f7fa6ef0
MH
1601 return ret;
1602}
1603
55479f64 1604int register_kprobe(struct kprobe *p)
f7fa6ef0
MH
1605{
1606 int ret;
1607 struct kprobe *old_p;
1608 struct module *probed_mod;
1609 kprobe_opcode_t *addr;
1610
1611 /* Adjust probe address from symbol */
1612 addr = kprobe_addr(p);
1613 if (IS_ERR(addr))
1614 return PTR_ERR(addr);
1615 p->addr = addr;
1616
1617 ret = check_kprobe_rereg(p);
1618 if (ret)
1619 return ret;
1620
1621 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1622 p->flags &= KPROBE_FLAG_DISABLED;
3516a460 1623 p->nmissed = 0;
9861668f 1624 INIT_LIST_HEAD(&p->list);
afd66255 1625
f7fa6ef0
MH
1626 ret = check_kprobe_address_safe(p, &probed_mod);
1627 if (ret)
1628 return ret;
1629
1630 mutex_lock(&kprobe_mutex);
afd66255 1631
64f562c6
AM
1632 old_p = get_kprobe(p->addr);
1633 if (old_p) {
afd66255 1634 /* Since this may unoptimize old_p, locking text_mutex. */
64f562c6 1635 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
1636 goto out;
1637 }
1da177e4 1638
2d1e38f5
TG
1639 cpus_read_lock();
1640 /* Prevent text modification */
1641 mutex_lock(&text_mutex);
ae6aa16f 1642 ret = prepare_kprobe(p);
25764288 1643 mutex_unlock(&text_mutex);
2d1e38f5 1644 cpus_read_unlock();
6f716acd 1645 if (ret)
afd66255 1646 goto out;
49a2a1b8 1647
64f562c6 1648 INIT_HLIST_NODE(&p->hlist);
3516a460 1649 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
1650 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1651
12310e34
JY
1652 if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1653 ret = arm_kprobe(p);
1654 if (ret) {
1655 hlist_del_rcu(&p->hlist);
ae8b7ce7 1656 synchronize_rcu();
12310e34
JY
1657 goto out;
1658 }
1659 }
afd66255
MH
1660
1661 /* Try to optimize kprobe */
1662 try_to_optimize_kprobe(p);
1da177e4 1663out:
7a7d1cf9 1664 mutex_unlock(&kprobe_mutex);
49a2a1b8 1665
e8386a0c 1666 if (probed_mod)
df019b1d 1667 module_put(probed_mod);
e8386a0c 1668
1da177e4
LT
1669 return ret;
1670}
99081ab5 1671EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 1672
6f0f1dd7 1673/* Check if all probes on the aggrprobe are disabled */
55479f64 1674static int aggr_kprobe_disabled(struct kprobe *ap)
6f0f1dd7
MH
1675{
1676 struct kprobe *kp;
1677
7e6a71d8
MH
1678 lockdep_assert_held(&kprobe_mutex);
1679
1680 list_for_each_entry(kp, &ap->list, list)
6f0f1dd7
MH
1681 if (!kprobe_disabled(kp))
1682 /*
1683 * There is an active probe on the list.
1684 * We can't disable this ap.
1685 */
1686 return 0;
1687
1688 return 1;
1689}
1690
1691/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
55479f64 1692static struct kprobe *__disable_kprobe(struct kprobe *p)
6f0f1dd7
MH
1693{
1694 struct kprobe *orig_p;
297f9233 1695 int ret;
6f0f1dd7
MH
1696
1697 /* Get an original kprobe for return */
1698 orig_p = __get_valid_kprobe(p);
1699 if (unlikely(orig_p == NULL))
297f9233 1700 return ERR_PTR(-EINVAL);
6f0f1dd7
MH
1701
1702 if (!kprobe_disabled(p)) {
1703 /* Disable probe if it is a child probe */
1704 if (p != orig_p)
1705 p->flags |= KPROBE_FLAG_DISABLED;
1706
1707 /* Try to disarm and disable this/parent probe */
1708 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
69d54b91
WN
1709 /*
1710 * If kprobes_all_disarmed is set, orig_p
1711 * should have already been disarmed, so
1712 * skip unneed disarming process.
1713 */
297f9233
JY
1714 if (!kprobes_all_disarmed) {
1715 ret = disarm_kprobe(orig_p, true);
1716 if (ret) {
1717 p->flags &= ~KPROBE_FLAG_DISABLED;
1718 return ERR_PTR(ret);
1719 }
1720 }
6f0f1dd7
MH
1721 orig_p->flags |= KPROBE_FLAG_DISABLED;
1722 }
1723 }
1724
1725 return orig_p;
1726}
1727
de5bd88d
MH
1728/*
1729 * Unregister a kprobe without a scheduler synchronization.
1730 */
55479f64 1731static int __unregister_kprobe_top(struct kprobe *p)
de5bd88d 1732{
6d8e40a8 1733 struct kprobe *ap, *list_p;
de5bd88d 1734
6f0f1dd7
MH
1735 /* Disable kprobe. This will disarm it if needed. */
1736 ap = __disable_kprobe(p);
297f9233
JY
1737 if (IS_ERR(ap))
1738 return PTR_ERR(ap);
de5bd88d 1739
6f0f1dd7 1740 if (ap == p)
bf8f6e5b 1741 /*
6f0f1dd7
MH
1742 * This probe is an independent(and non-optimized) kprobe
1743 * (not an aggrprobe). Remove from the hash list.
bf8f6e5b 1744 */
6f0f1dd7
MH
1745 goto disarmed;
1746
1747 /* Following process expects this probe is an aggrprobe */
1748 WARN_ON(!kprobe_aggrprobe(ap));
1749
6274de49
MH
1750 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1751 /*
1752 * !disarmed could be happen if the probe is under delayed
1753 * unoptimizing.
1754 */
6f0f1dd7
MH
1755 goto disarmed;
1756 else {
1757 /* If disabling probe has special handlers, update aggrprobe */
e8386a0c 1758 if (p->post_handler && !kprobe_gone(p)) {
7e6a71d8 1759 list_for_each_entry(list_p, &ap->list, list) {
9861668f
MH
1760 if ((list_p != p) && (list_p->post_handler))
1761 goto noclean;
1762 }
6d8e40a8 1763 ap->post_handler = NULL;
9861668f
MH
1764 }
1765noclean:
6f0f1dd7
MH
1766 /*
1767 * Remove from the aggrprobe: this path will do nothing in
1768 * __unregister_kprobe_bottom().
1769 */
49a2a1b8 1770 list_del_rcu(&p->list);
6f0f1dd7
MH
1771 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1772 /*
1773 * Try to optimize this probe again, because post
1774 * handler may have been changed.
1775 */
1776 optimize_kprobe(ap);
49a2a1b8 1777 }
9861668f 1778 return 0;
6f0f1dd7
MH
1779
1780disarmed:
1781 hlist_del_rcu(&ap->hlist);
1782 return 0;
9861668f 1783}
3516a460 1784
55479f64 1785static void __unregister_kprobe_bottom(struct kprobe *p)
9861668f 1786{
6d8e40a8 1787 struct kprobe *ap;
b3e55c72 1788
e8386a0c 1789 if (list_empty(&p->list))
6274de49 1790 /* This is an independent kprobe */
0498b635 1791 arch_remove_kprobe(p);
e8386a0c 1792 else if (list_is_singular(&p->list)) {
6274de49 1793 /* This is the last child of an aggrprobe */
6d8e40a8 1794 ap = list_entry(p->list.next, struct kprobe, list);
e8386a0c 1795 list_del(&p->list);
6d8e40a8 1796 free_aggr_kprobe(ap);
9861668f 1797 }
6274de49 1798 /* Otherwise, do nothing. */
9861668f
MH
1799}
1800
55479f64 1801int register_kprobes(struct kprobe **kps, int num)
9861668f
MH
1802{
1803 int i, ret = 0;
1804
1805 if (num <= 0)
1806 return -EINVAL;
1807 for (i = 0; i < num; i++) {
49ad2fd7 1808 ret = register_kprobe(kps[i]);
67dddaad
MH
1809 if (ret < 0) {
1810 if (i > 0)
1811 unregister_kprobes(kps, i);
9861668f 1812 break;
36721656 1813 }
49a2a1b8 1814 }
9861668f
MH
1815 return ret;
1816}
99081ab5 1817EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 1818
55479f64 1819void unregister_kprobe(struct kprobe *p)
9861668f
MH
1820{
1821 unregister_kprobes(&p, 1);
1822}
99081ab5 1823EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 1824
55479f64 1825void unregister_kprobes(struct kprobe **kps, int num)
9861668f
MH
1826{
1827 int i;
1828
1829 if (num <= 0)
1830 return;
1831 mutex_lock(&kprobe_mutex);
1832 for (i = 0; i < num; i++)
1833 if (__unregister_kprobe_top(kps[i]) < 0)
1834 kps[i]->addr = NULL;
1835 mutex_unlock(&kprobe_mutex);
1836
ae8b7ce7 1837 synchronize_rcu();
9861668f
MH
1838 for (i = 0; i < num; i++)
1839 if (kps[i]->addr)
1840 __unregister_kprobe_bottom(kps[i]);
1da177e4 1841}
99081ab5 1842EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4 1843
5f6bee34
NR
1844int __weak kprobe_exceptions_notify(struct notifier_block *self,
1845 unsigned long val, void *data)
fc62d020
NR
1846{
1847 return NOTIFY_DONE;
1848}
5f6bee34 1849NOKPROBE_SYMBOL(kprobe_exceptions_notify);
fc62d020 1850
1da177e4 1851static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
1852 .notifier_call = kprobe_exceptions_notify,
1853 .priority = 0x7fffffff /* we need to be notified first */
1854};
1855
3d7e3382
ME
1856unsigned long __weak arch_deref_entry_point(void *entry)
1857{
1858 return (unsigned long)entry;
1859}
1da177e4 1860
9edddaa2 1861#ifdef CONFIG_KRETPROBES
66ada2cc
MH
1862
1863unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
1864 void *trampoline_address,
1865 void *frame_pointer)
1866{
66ada2cc 1867 kprobe_opcode_t *correct_ret_addr = NULL;
d741bf41
PZ
1868 struct kretprobe_instance *ri = NULL;
1869 struct llist_node *first, *node;
1870 struct kretprobe *rp;
66ada2cc 1871
d741bf41
PZ
1872 /* Find all nodes for this frame. */
1873 first = node = current->kretprobe_instances.first;
1874 while (node) {
1875 ri = container_of(node, struct kretprobe_instance, llist);
66ada2cc 1876
d741bf41 1877 BUG_ON(ri->fp != frame_pointer);
66ada2cc 1878
d741bf41
PZ
1879 if (ri->ret_addr != trampoline_address) {
1880 correct_ret_addr = ri->ret_addr;
66ada2cc
MH
1881 /*
1882 * This is the real return address. Any other
1883 * instances associated with this task are for
1884 * other calls deeper on the call stack
1885 */
d741bf41
PZ
1886 goto found;
1887 }
1888
1889 node = node->next;
66ada2cc 1890 }
d741bf41
PZ
1891 pr_err("Oops! Kretprobe fails to find correct return address.\n");
1892 BUG_ON(1);
66ada2cc 1893
d741bf41
PZ
1894found:
1895 /* Unlink all nodes for this frame. */
1896 current->kretprobe_instances.first = node->next;
1897 node->next = NULL;
66ada2cc 1898
d741bf41
PZ
1899 /* Run them.. */
1900 while (first) {
1901 ri = container_of(first, struct kretprobe_instance, llist);
1902 first = first->next;
66ada2cc 1903
d741bf41
PZ
1904 rp = get_kretprobe(ri);
1905 if (rp && rp->handler) {
66ada2cc
MH
1906 struct kprobe *prev = kprobe_running();
1907
d741bf41 1908 __this_cpu_write(current_kprobe, &rp->kp);
66ada2cc 1909 ri->ret_addr = correct_ret_addr;
d741bf41 1910 rp->handler(ri, regs);
66ada2cc
MH
1911 __this_cpu_write(current_kprobe, prev);
1912 }
1913
b3388178 1914 recycle_rp_inst(ri);
66ada2cc
MH
1915 }
1916
66ada2cc
MH
1917 return (unsigned long)correct_ret_addr;
1918}
1919NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
1920
e65cefe8
AB
1921/*
1922 * This kprobe pre_handler is registered with every kretprobe. When probe
1923 * hits it will set up the return probe.
1924 */
820aede0 1925static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
e65cefe8
AB
1926{
1927 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5 1928 struct kretprobe_instance *ri;
6e426e0f 1929 struct freelist_node *fn;
e65cefe8 1930
6e426e0f
PZ
1931 fn = freelist_try_get(&rp->freelist);
1932 if (!fn) {
1933 rp->nmissed++;
1934 return 0;
1935 }
4c4308cb 1936
6e426e0f 1937 ri = container_of(fn, struct kretprobe_instance, freelist);
d741bf41 1938
6e426e0f
PZ
1939 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1940 freelist_add(&ri->freelist, &rp->freelist);
1941 return 0;
ef53d9c5 1942 }
6e426e0f
PZ
1943
1944 arch_prepare_kretprobe(ri, regs);
1945
1946 __llist_add(&ri->llist, &current->kretprobe_instances);
1947
e65cefe8
AB
1948 return 0;
1949}
820aede0 1950NOKPROBE_SYMBOL(pre_handler_kretprobe);
e65cefe8 1951
659b957f 1952bool __weak arch_kprobe_on_func_entry(unsigned long offset)
90ec5e89
NR
1953{
1954 return !offset;
1955}
1956
659b957f 1957bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1d585e70
NR
1958{
1959 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1960
1961 if (IS_ERR(kp_addr))
1962 return false;
1963
1964 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
659b957f 1965 !arch_kprobe_on_func_entry(offset))
1d585e70
NR
1966 return false;
1967
1968 return true;
1969}
1970
55479f64 1971int register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1972{
1973 int ret = 0;
1974 struct kretprobe_instance *inst;
1975 int i;
b2a5cd69 1976 void *addr;
90ec5e89 1977
659b957f 1978 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
90ec5e89 1979 return -EINVAL;
f438d914
MH
1980
1981 if (kretprobe_blacklist_size) {
b2a5cd69 1982 addr = kprobe_addr(&rp->kp);
bc81d48d
MH
1983 if (IS_ERR(addr))
1984 return PTR_ERR(addr);
f438d914
MH
1985
1986 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1987 if (kretprobe_blacklist[i].addr == addr)
1988 return -EINVAL;
1989 }
1990 }
b94cce92
HN
1991
1992 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
1993 rp->kp.post_handler = NULL;
1994 rp->kp.fault_handler = NULL;
b94cce92
HN
1995
1996 /* Pre-allocate memory for max kretprobe instances */
1997 if (rp->maxactive <= 0) {
92616606 1998#ifdef CONFIG_PREEMPTION
c2ef6661 1999 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
b94cce92 2000#else
4dae560f 2001 rp->maxactive = num_possible_cpus();
b94cce92
HN
2002#endif
2003 }
6e426e0f 2004 rp->freelist.head = NULL;
d741bf41
PZ
2005 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2006 if (!rp->rph)
2007 return -ENOMEM;
2008
2009 rp->rph->rp = rp;
b94cce92 2010 for (i = 0; i < rp->maxactive; i++) {
d741bf41 2011 inst = kzalloc(sizeof(struct kretprobe_instance) +
f47cd9b5 2012 rp->data_size, GFP_KERNEL);
b94cce92 2013 if (inst == NULL) {
d741bf41 2014 refcount_set(&rp->rph->ref, i);
b94cce92
HN
2015 free_rp_inst(rp);
2016 return -ENOMEM;
2017 }
d741bf41 2018 inst->rph = rp->rph;
6e426e0f 2019 freelist_add(&inst->freelist, &rp->freelist);
b94cce92 2020 }
d741bf41 2021 refcount_set(&rp->rph->ref, i);
b94cce92
HN
2022
2023 rp->nmissed = 0;
2024 /* Establish function entry probe point */
49ad2fd7 2025 ret = register_kprobe(&rp->kp);
4a296e07 2026 if (ret != 0)
b94cce92
HN
2027 free_rp_inst(rp);
2028 return ret;
2029}
99081ab5 2030EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 2031
55479f64 2032int register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2033{
2034 int ret = 0, i;
2035
2036 if (num <= 0)
2037 return -EINVAL;
2038 for (i = 0; i < num; i++) {
49ad2fd7 2039 ret = register_kretprobe(rps[i]);
67dddaad
MH
2040 if (ret < 0) {
2041 if (i > 0)
2042 unregister_kretprobes(rps, i);
4a296e07
MH
2043 break;
2044 }
2045 }
2046 return ret;
2047}
99081ab5 2048EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 2049
55479f64 2050void unregister_kretprobe(struct kretprobe *rp)
4a296e07
MH
2051{
2052 unregister_kretprobes(&rp, 1);
2053}
99081ab5 2054EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 2055
55479f64 2056void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2057{
2058 int i;
2059
2060 if (num <= 0)
2061 return;
2062 mutex_lock(&kprobe_mutex);
d741bf41 2063 for (i = 0; i < num; i++) {
4a296e07
MH
2064 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2065 rps[i]->kp.addr = NULL;
d741bf41
PZ
2066 rps[i]->rph->rp = NULL;
2067 }
4a296e07
MH
2068 mutex_unlock(&kprobe_mutex);
2069
ae8b7ce7 2070 synchronize_rcu();
4a296e07
MH
2071 for (i = 0; i < num; i++) {
2072 if (rps[i]->kp.addr) {
2073 __unregister_kprobe_bottom(&rps[i]->kp);
d741bf41 2074 free_rp_inst(rps[i]);
4a296e07
MH
2075 }
2076 }
2077}
99081ab5 2078EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 2079
9edddaa2 2080#else /* CONFIG_KRETPROBES */
55479f64 2081int register_kretprobe(struct kretprobe *rp)
b94cce92
HN
2082{
2083 return -ENOSYS;
2084}
99081ab5 2085EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 2086
55479f64 2087int register_kretprobes(struct kretprobe **rps, int num)
346fd59b 2088{
4a296e07 2089 return -ENOSYS;
346fd59b 2090}
99081ab5
MH
2091EXPORT_SYMBOL_GPL(register_kretprobes);
2092
55479f64 2093void unregister_kretprobe(struct kretprobe *rp)
b94cce92 2094{
4a296e07 2095}
99081ab5 2096EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 2097
55479f64 2098void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2099{
2100}
99081ab5 2101EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 2102
820aede0 2103static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
4a296e07
MH
2104{
2105 return 0;
b94cce92 2106}
820aede0 2107NOKPROBE_SYMBOL(pre_handler_kretprobe);
b94cce92 2108
4a296e07
MH
2109#endif /* CONFIG_KRETPROBES */
2110
e8386a0c 2111/* Set the kprobe gone and remove its instruction buffer. */
55479f64 2112static void kill_kprobe(struct kprobe *p)
e8386a0c
MH
2113{
2114 struct kprobe *kp;
de5bd88d 2115
7e6a71d8
MH
2116 lockdep_assert_held(&kprobe_mutex);
2117
e8386a0c 2118 p->flags |= KPROBE_FLAG_GONE;
afd66255 2119 if (kprobe_aggrprobe(p)) {
e8386a0c
MH
2120 /*
2121 * If this is an aggr_kprobe, we have to list all the
2122 * chained probes and mark them GONE.
2123 */
7e6a71d8 2124 list_for_each_entry(kp, &p->list, list)
e8386a0c
MH
2125 kp->flags |= KPROBE_FLAG_GONE;
2126 p->post_handler = NULL;
afd66255 2127 kill_optimized_kprobe(p);
e8386a0c
MH
2128 }
2129 /*
2130 * Here, we can remove insn_slot safely, because no thread calls
2131 * the original probed function (which will be freed soon) any more.
2132 */
2133 arch_remove_kprobe(p);
0cb2f137
MS
2134
2135 /*
2136 * The module is going away. We should disarm the kprobe which
bcb53209
MH
2137 * is using ftrace, because ftrace framework is still available at
2138 * MODULE_STATE_GOING notification.
0cb2f137 2139 */
bcb53209 2140 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
0cb2f137 2141 disarm_kprobe_ftrace(p);
e8386a0c
MH
2142}
2143
c0614829 2144/* Disable one kprobe */
55479f64 2145int disable_kprobe(struct kprobe *kp)
c0614829
MH
2146{
2147 int ret = 0;
297f9233 2148 struct kprobe *p;
c0614829
MH
2149
2150 mutex_lock(&kprobe_mutex);
2151
6f0f1dd7 2152 /* Disable this kprobe */
297f9233
JY
2153 p = __disable_kprobe(kp);
2154 if (IS_ERR(p))
2155 ret = PTR_ERR(p);
c0614829 2156
c0614829
MH
2157 mutex_unlock(&kprobe_mutex);
2158 return ret;
2159}
2160EXPORT_SYMBOL_GPL(disable_kprobe);
2161
2162/* Enable one kprobe */
55479f64 2163int enable_kprobe(struct kprobe *kp)
c0614829
MH
2164{
2165 int ret = 0;
2166 struct kprobe *p;
2167
2168 mutex_lock(&kprobe_mutex);
2169
2170 /* Check whether specified probe is valid. */
2171 p = __get_valid_kprobe(kp);
2172 if (unlikely(p == NULL)) {
2173 ret = -EINVAL;
2174 goto out;
2175 }
2176
2177 if (kprobe_gone(kp)) {
2178 /* This kprobe has gone, we couldn't enable it. */
2179 ret = -EINVAL;
2180 goto out;
2181 }
2182
2183 if (p != kp)
2184 kp->flags &= ~KPROBE_FLAG_DISABLED;
2185
2186 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2187 p->flags &= ~KPROBE_FLAG_DISABLED;
12310e34
JY
2188 ret = arm_kprobe(p);
2189 if (ret)
2190 p->flags |= KPROBE_FLAG_DISABLED;
c0614829
MH
2191 }
2192out:
2193 mutex_unlock(&kprobe_mutex);
2194 return ret;
2195}
2196EXPORT_SYMBOL_GPL(enable_kprobe);
2197
4458515b 2198/* Caller must NOT call this in usual path. This is only for critical case */
820aede0 2199void dump_kprobe(struct kprobe *kp)
24851d24 2200{
4458515b
MH
2201 pr_err("Dumping kprobe:\n");
2202 pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
2203 kp->symbol_name, kp->offset, kp->addr);
24851d24 2204}
820aede0 2205NOKPROBE_SYMBOL(dump_kprobe);
24851d24 2206
fb1a59fa
MH
2207int kprobe_add_ksym_blacklist(unsigned long entry)
2208{
2209 struct kprobe_blacklist_entry *ent;
2210 unsigned long offset = 0, size = 0;
2211
2212 if (!kernel_text_address(entry) ||
2213 !kallsyms_lookup_size_offset(entry, &size, &offset))
2214 return -EINVAL;
2215
2216 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2217 if (!ent)
2218 return -ENOMEM;
2219 ent->start_addr = entry;
2220 ent->end_addr = entry + size;
2221 INIT_LIST_HEAD(&ent->list);
2222 list_add_tail(&ent->list, &kprobe_blacklist);
2223
2224 return (int)size;
2225}
2226
2227/* Add all symbols in given area into kprobe blacklist */
2228int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2229{
2230 unsigned long entry;
2231 int ret = 0;
2232
2233 for (entry = start; entry < end; entry += ret) {
2234 ret = kprobe_add_ksym_blacklist(entry);
2235 if (ret < 0)
2236 return ret;
2237 if (ret == 0) /* In case of alias symbol */
2238 ret = 1;
2239 }
2240 return 0;
2241}
2242
1e6769b0
MH
2243/* Remove all symbols in given area from kprobe blacklist */
2244static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2245{
2246 struct kprobe_blacklist_entry *ent, *n;
2247
2248 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2249 if (ent->start_addr < start || ent->start_addr >= end)
2250 continue;
2251 list_del(&ent->list);
2252 kfree(ent);
2253 }
2254}
2255
16db6264
MH
2256static void kprobe_remove_ksym_blacklist(unsigned long entry)
2257{
2258 kprobe_remove_area_blacklist(entry, entry + 1);
2259}
2260
d002b8bc
AH
2261int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2262 char *type, char *sym)
2263{
2264 return -ERANGE;
2265}
2266
2267int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2268 char *sym)
2269{
2270#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2271 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2272 return 0;
2273#ifdef CONFIG_OPTPROBES
2274 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2275 return 0;
2276#endif
2277#endif
2278 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2279 return 0;
2280 return -ERANGE;
2281}
2282
fb1a59fa
MH
2283int __init __weak arch_populate_kprobe_blacklist(void)
2284{
2285 return 0;
2286}
2287
376e2424
MH
2288/*
2289 * Lookup and populate the kprobe_blacklist.
2290 *
2291 * Unlike the kretprobe blacklist, we'll need to determine
2292 * the range of addresses that belong to the said functions,
2293 * since a kprobe need not necessarily be at the beginning
2294 * of a function.
2295 */
2296static int __init populate_kprobe_blacklist(unsigned long *start,
2297 unsigned long *end)
2298{
fb1a59fa 2299 unsigned long entry;
376e2424 2300 unsigned long *iter;
fb1a59fa 2301 int ret;
376e2424
MH
2302
2303 for (iter = start; iter < end; iter++) {
d81b4253 2304 entry = arch_deref_entry_point((void *)*iter);
fb1a59fa
MH
2305 ret = kprobe_add_ksym_blacklist(entry);
2306 if (ret == -EINVAL)
376e2424 2307 continue;
fb1a59fa
MH
2308 if (ret < 0)
2309 return ret;
376e2424 2310 }
fb1a59fa
MH
2311
2312 /* Symbols in __kprobes_text are blacklisted */
2313 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2314 (unsigned long)__kprobes_text_end);
66e9b071
TG
2315 if (ret)
2316 return ret;
2317
2318 /* Symbols in noinstr section are blacklisted */
2319 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2320 (unsigned long)__noinstr_text_end);
fb1a59fa
MH
2321
2322 return ret ? : arch_populate_kprobe_blacklist();
376e2424
MH
2323}
2324
1e6769b0
MH
2325static void add_module_kprobe_blacklist(struct module *mod)
2326{
2327 unsigned long start, end;
16db6264
MH
2328 int i;
2329
2330 if (mod->kprobe_blacklist) {
2331 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2332 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2333 }
1e6769b0
MH
2334
2335 start = (unsigned long)mod->kprobes_text_start;
2336 if (start) {
2337 end = start + mod->kprobes_text_size;
2338 kprobe_add_area_blacklist(start, end);
2339 }
66e9b071
TG
2340
2341 start = (unsigned long)mod->noinstr_text_start;
2342 if (start) {
2343 end = start + mod->noinstr_text_size;
2344 kprobe_add_area_blacklist(start, end);
2345 }
1e6769b0
MH
2346}
2347
2348static void remove_module_kprobe_blacklist(struct module *mod)
2349{
2350 unsigned long start, end;
16db6264
MH
2351 int i;
2352
2353 if (mod->kprobe_blacklist) {
2354 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2355 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2356 }
1e6769b0
MH
2357
2358 start = (unsigned long)mod->kprobes_text_start;
2359 if (start) {
2360 end = start + mod->kprobes_text_size;
2361 kprobe_remove_area_blacklist(start, end);
2362 }
66e9b071
TG
2363
2364 start = (unsigned long)mod->noinstr_text_start;
2365 if (start) {
2366 end = start + mod->noinstr_text_size;
2367 kprobe_remove_area_blacklist(start, end);
2368 }
1e6769b0
MH
2369}
2370
e8386a0c 2371/* Module notifier call back, checking kprobes on the module */
55479f64
MH
2372static int kprobes_module_callback(struct notifier_block *nb,
2373 unsigned long val, void *data)
e8386a0c
MH
2374{
2375 struct module *mod = data;
2376 struct hlist_head *head;
e8386a0c
MH
2377 struct kprobe *p;
2378 unsigned int i;
f24659d9 2379 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 2380
1e6769b0
MH
2381 if (val == MODULE_STATE_COMING) {
2382 mutex_lock(&kprobe_mutex);
2383 add_module_kprobe_blacklist(mod);
2384 mutex_unlock(&kprobe_mutex);
2385 }
f24659d9 2386 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
2387 return NOTIFY_DONE;
2388
2389 /*
f24659d9
MH
2390 * When MODULE_STATE_GOING was notified, both of module .text and
2391 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2392 * notified, only .init.text section would be freed. We need to
2393 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
2394 */
2395 mutex_lock(&kprobe_mutex);
2396 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2397 head = &kprobe_table[i];
7e6a71d8 2398 hlist_for_each_entry(p, head, hlist)
f24659d9
MH
2399 if (within_module_init((unsigned long)p->addr, mod) ||
2400 (checkcore &&
2401 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
2402 /*
2403 * The vaddr this probe is installed will soon
2404 * be vfreed buy not synced to disk. Hence,
2405 * disarming the breakpoint isn't needed.
545a0281
SRV
2406 *
2407 * Note, this will also move any optimized probes
2408 * that are pending to be removed from their
2409 * corresponding lists to the freeing_list and
2410 * will not be touched by the delayed
2411 * kprobe_optimizer work handler.
e8386a0c
MH
2412 */
2413 kill_kprobe(p);
2414 }
2415 }
1e6769b0
MH
2416 if (val == MODULE_STATE_GOING)
2417 remove_module_kprobe_blacklist(mod);
e8386a0c
MH
2418 mutex_unlock(&kprobe_mutex);
2419 return NOTIFY_DONE;
2420}
2421
2422static struct notifier_block kprobe_module_nb = {
2423 .notifier_call = kprobes_module_callback,
2424 .priority = 0
2425};
2426
376e2424
MH
2427/* Markers of _kprobe_blacklist section */
2428extern unsigned long __start_kprobe_blacklist[];
2429extern unsigned long __stop_kprobe_blacklist[];
2430
82d083ab
MH
2431void kprobe_free_init_mem(void)
2432{
2433 void *start = (void *)(&__init_begin);
2434 void *end = (void *)(&__init_end);
2435 struct hlist_head *head;
2436 struct kprobe *p;
2437 int i;
2438
2439 mutex_lock(&kprobe_mutex);
2440
2441 /* Kill all kprobes on initmem */
2442 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2443 head = &kprobe_table[i];
2444 hlist_for_each_entry(p, head, hlist) {
2445 if (start <= (void *)p->addr && (void *)p->addr < end)
2446 kill_kprobe(p);
2447 }
2448 }
2449
2450 mutex_unlock(&kprobe_mutex);
2451}
2452
1da177e4
LT
2453static int __init init_kprobes(void)
2454{
2455 int i, err = 0;
2456
2457 /* FIXME allocate the probe table, currently defined statically */
2458 /* initialize all list heads */
d741bf41 2459 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1da177e4
LT
2460 INIT_HLIST_HEAD(&kprobe_table[i]);
2461
376e2424
MH
2462 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2463 __stop_kprobe_blacklist);
2464 if (err) {
2465 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2466 pr_err("Please take care of using kprobes.\n");
3d8d996e
SD
2467 }
2468
f438d914
MH
2469 if (kretprobe_blacklist_size) {
2470 /* lookup the function address from its name */
2471 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
49e0b465 2472 kretprobe_blacklist[i].addr =
290e3070 2473 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
f438d914
MH
2474 if (!kretprobe_blacklist[i].addr)
2475 printk("kretprobe: lookup failed: %s\n",
2476 kretprobe_blacklist[i].name);
2477 }
2478 }
2479
b2be84df
MH
2480#if defined(CONFIG_OPTPROBES)
2481#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
afd66255
MH
2482 /* Init kprobe_optinsn_slots */
2483 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2484#endif
b2be84df
MH
2485 /* By default, kprobes can be optimized */
2486 kprobes_allow_optimization = true;
2487#endif
afd66255 2488
e579abeb
MH
2489 /* By default, kprobes are armed */
2490 kprobes_all_disarmed = false;
bf8f6e5b 2491
6772926b 2492 err = arch_init_kprobes();
802eae7c
RL
2493 if (!err)
2494 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c
MH
2495 if (!err)
2496 err = register_module_notifier(&kprobe_module_nb);
2497
ef53d9c5 2498 kprobes_initialized = (err == 0);
802eae7c 2499
8c1c9356
AM
2500 if (!err)
2501 init_test_probes();
1da177e4
LT
2502 return err;
2503}
36dadef2 2504early_initcall(init_kprobes);
1da177e4 2505
346fd59b 2506#ifdef CONFIG_DEBUG_FS
55479f64 2507static void report_probe(struct seq_file *pi, struct kprobe *p,
afd66255 2508 const char *sym, int offset, char *modname, struct kprobe *pp)
346fd59b
SD
2509{
2510 char *kprobe_type;
81365a94 2511 void *addr = p->addr;
346fd59b
SD
2512
2513 if (p->pre_handler == pre_handler_kretprobe)
2514 kprobe_type = "r";
346fd59b
SD
2515 else
2516 kprobe_type = "k";
afd66255 2517
60f7bb66 2518 if (!kallsyms_show_value(pi->file->f_cred))
81365a94
MH
2519 addr = NULL;
2520
346fd59b 2521 if (sym)
81365a94
MH
2522 seq_printf(pi, "%px %s %s+0x%x %s ",
2523 addr, kprobe_type, sym, offset,
afd66255 2524 (modname ? modname : " "));
81365a94
MH
2525 else /* try to use %pS */
2526 seq_printf(pi, "%px %s %pS ",
2527 addr, kprobe_type, p->addr);
afd66255
MH
2528
2529 if (!pp)
2530 pp = p;
ae6aa16f 2531 seq_printf(pi, "%s%s%s%s\n",
afd66255
MH
2532 (kprobe_gone(p) ? "[GONE]" : ""),
2533 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
ae6aa16f
MH
2534 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2535 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
346fd59b
SD
2536}
2537
55479f64 2538static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
346fd59b
SD
2539{
2540 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2541}
2542
55479f64 2543static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
346fd59b
SD
2544{
2545 (*pos)++;
2546 if (*pos >= KPROBE_TABLE_SIZE)
2547 return NULL;
2548 return pos;
2549}
2550
55479f64 2551static void kprobe_seq_stop(struct seq_file *f, void *v)
346fd59b
SD
2552{
2553 /* Nothing to do */
2554}
2555
55479f64 2556static int show_kprobe_addr(struct seq_file *pi, void *v)
346fd59b
SD
2557{
2558 struct hlist_head *head;
346fd59b
SD
2559 struct kprobe *p, *kp;
2560 const char *sym = NULL;
2561 unsigned int i = *(loff_t *) v;
ffb45122 2562 unsigned long offset = 0;
ab767865 2563 char *modname, namebuf[KSYM_NAME_LEN];
346fd59b
SD
2564
2565 head = &kprobe_table[i];
2566 preempt_disable();
b67bfe0d 2567 hlist_for_each_entry_rcu(p, head, hlist) {
ffb45122 2568 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b 2569 &offset, &modname, namebuf);
afd66255 2570 if (kprobe_aggrprobe(p)) {
346fd59b 2571 list_for_each_entry_rcu(kp, &p->list, list)
afd66255 2572 report_probe(pi, kp, sym, offset, modname, p);
346fd59b 2573 } else
afd66255 2574 report_probe(pi, p, sym, offset, modname, NULL);
346fd59b
SD
2575 }
2576 preempt_enable();
2577 return 0;
2578}
2579
eac2cece 2580static const struct seq_operations kprobes_sops = {
346fd59b
SD
2581 .start = kprobe_seq_start,
2582 .next = kprobe_seq_next,
2583 .stop = kprobe_seq_stop,
2584 .show = show_kprobe_addr
2585};
2586
eac2cece 2587DEFINE_SEQ_ATTRIBUTE(kprobes);
346fd59b 2588
63724740
MH
2589/* kprobes/blacklist -- shows which functions can not be probed */
2590static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2591{
4fdd8887 2592 mutex_lock(&kprobe_mutex);
63724740
MH
2593 return seq_list_start(&kprobe_blacklist, *pos);
2594}
2595
2596static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2597{
2598 return seq_list_next(v, &kprobe_blacklist, pos);
2599}
2600
2601static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2602{
2603 struct kprobe_blacklist_entry *ent =
2604 list_entry(v, struct kprobe_blacklist_entry, list);
2605
ffb9bd68
MH
2606 /*
2607 * If /proc/kallsyms is not showing kernel address, we won't
2608 * show them here either.
2609 */
60f7bb66 2610 if (!kallsyms_show_value(m->file->f_cred))
ffb9bd68
MH
2611 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2612 (void *)ent->start_addr);
2613 else
2614 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2615 (void *)ent->end_addr, (void *)ent->start_addr);
63724740
MH
2616 return 0;
2617}
2618
4fdd8887
MH
2619static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2620{
2621 mutex_unlock(&kprobe_mutex);
2622}
2623
eac2cece 2624static const struct seq_operations kprobe_blacklist_sops = {
63724740
MH
2625 .start = kprobe_blacklist_seq_start,
2626 .next = kprobe_blacklist_seq_next,
4fdd8887 2627 .stop = kprobe_blacklist_seq_stop,
63724740
MH
2628 .show = kprobe_blacklist_seq_show,
2629};
eac2cece 2630DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
63724740 2631
12310e34 2632static int arm_all_kprobes(void)
bf8f6e5b
AM
2633{
2634 struct hlist_head *head;
bf8f6e5b 2635 struct kprobe *p;
12310e34
JY
2636 unsigned int i, total = 0, errors = 0;
2637 int err, ret = 0;
bf8f6e5b
AM
2638
2639 mutex_lock(&kprobe_mutex);
2640
e579abeb
MH
2641 /* If kprobes are armed, just return */
2642 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2643 goto already_enabled;
2644
977ad481
WN
2645 /*
2646 * optimize_kprobe() called by arm_kprobe() checks
2647 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2648 * arm_kprobe.
2649 */
2650 kprobes_all_disarmed = false;
afd66255 2651 /* Arming kprobes doesn't optimize kprobe itself */
bf8f6e5b
AM
2652 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2653 head = &kprobe_table[i];
12310e34 2654 /* Arm all kprobes on a best-effort basis */
7e6a71d8 2655 hlist_for_each_entry(p, head, hlist) {
12310e34
JY
2656 if (!kprobe_disabled(p)) {
2657 err = arm_kprobe(p);
2658 if (err) {
2659 errors++;
2660 ret = err;
2661 }
2662 total++;
2663 }
2664 }
bf8f6e5b
AM
2665 }
2666
12310e34
JY
2667 if (errors)
2668 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2669 errors, total);
2670 else
2671 pr_info("Kprobes globally enabled\n");
bf8f6e5b
AM
2672
2673already_enabled:
2674 mutex_unlock(&kprobe_mutex);
12310e34 2675 return ret;
bf8f6e5b
AM
2676}
2677
297f9233 2678static int disarm_all_kprobes(void)
bf8f6e5b
AM
2679{
2680 struct hlist_head *head;
bf8f6e5b 2681 struct kprobe *p;
297f9233
JY
2682 unsigned int i, total = 0, errors = 0;
2683 int err, ret = 0;
bf8f6e5b
AM
2684
2685 mutex_lock(&kprobe_mutex);
2686
e579abeb 2687 /* If kprobes are already disarmed, just return */
6274de49
MH
2688 if (kprobes_all_disarmed) {
2689 mutex_unlock(&kprobe_mutex);
297f9233 2690 return 0;
6274de49 2691 }
bf8f6e5b 2692
e579abeb 2693 kprobes_all_disarmed = true;
afd66255 2694
bf8f6e5b
AM
2695 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2696 head = &kprobe_table[i];
297f9233 2697 /* Disarm all kprobes on a best-effort basis */
7e6a71d8 2698 hlist_for_each_entry(p, head, hlist) {
297f9233
JY
2699 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2700 err = disarm_kprobe(p, false);
2701 if (err) {
2702 errors++;
2703 ret = err;
2704 }
2705 total++;
2706 }
bf8f6e5b
AM
2707 }
2708 }
297f9233
JY
2709
2710 if (errors)
2711 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2712 errors, total);
2713 else
2714 pr_info("Kprobes globally disabled\n");
2715
bf8f6e5b 2716 mutex_unlock(&kprobe_mutex);
bf8f6e5b 2717
6274de49
MH
2718 /* Wait for disarming all kprobes by optimizer */
2719 wait_for_kprobe_optimizer();
297f9233
JY
2720
2721 return ret;
bf8f6e5b
AM
2722}
2723
2724/*
2725 * XXX: The debugfs bool file interface doesn't allow for callbacks
2726 * when the bool state is switched. We can reuse that facility when
2727 * available
2728 */
2729static ssize_t read_enabled_file_bool(struct file *file,
2730 char __user *user_buf, size_t count, loff_t *ppos)
2731{
2732 char buf[3];
2733
e579abeb 2734 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2735 buf[0] = '1';
2736 else
2737 buf[0] = '0';
2738 buf[1] = '\n';
2739 buf[2] = 0x00;
2740 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2741}
2742
2743static ssize_t write_enabled_file_bool(struct file *file,
2744 const char __user *user_buf, size_t count, loff_t *ppos)
2745{
2746 char buf[32];
efeb156e 2747 size_t buf_size;
12310e34 2748 int ret = 0;
bf8f6e5b
AM
2749
2750 buf_size = min(count, (sizeof(buf)-1));
2751 if (copy_from_user(buf, user_buf, buf_size))
2752 return -EFAULT;
2753
10fb46d5 2754 buf[buf_size] = '\0';
bf8f6e5b
AM
2755 switch (buf[0]) {
2756 case 'y':
2757 case 'Y':
2758 case '1':
12310e34 2759 ret = arm_all_kprobes();
bf8f6e5b
AM
2760 break;
2761 case 'n':
2762 case 'N':
2763 case '0':
297f9233 2764 ret = disarm_all_kprobes();
bf8f6e5b 2765 break;
10fb46d5
MK
2766 default:
2767 return -EINVAL;
bf8f6e5b
AM
2768 }
2769
12310e34
JY
2770 if (ret)
2771 return ret;
2772
bf8f6e5b
AM
2773 return count;
2774}
2775
828c0950 2776static const struct file_operations fops_kp = {
bf8f6e5b
AM
2777 .read = read_enabled_file_bool,
2778 .write = write_enabled_file_bool,
6038f373 2779 .llseek = default_llseek,
bf8f6e5b
AM
2780};
2781
55479f64 2782static int __init debugfs_kprobe_init(void)
346fd59b 2783{
8c0fd1fa 2784 struct dentry *dir;
bf8f6e5b 2785 unsigned int value = 1;
346fd59b
SD
2786
2787 dir = debugfs_create_dir("kprobes", NULL);
346fd59b 2788
eac2cece 2789 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
346fd59b 2790
8c0fd1fa 2791 debugfs_create_file("enabled", 0600, dir, &value, &fops_kp);
63724740 2792
8c0fd1fa 2793 debugfs_create_file("blacklist", 0400, dir, NULL,
eac2cece 2794 &kprobe_blacklist_fops);
bf8f6e5b 2795
346fd59b
SD
2796 return 0;
2797}
2798
2799late_initcall(debugfs_kprobe_init);
2800#endif /* CONFIG_DEBUG_FS */