]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - kernel/trace/trace_uprobe.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/kernel/stable.git] / kernel / trace / trace_uprobe.c
1 /*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20 #define pr_fmt(fmt) "trace_kprobe: " fmt
21
22 #include <linux/module.h>
23 #include <linux/uaccess.h>
24 #include <linux/uprobes.h>
25 #include <linux/namei.h>
26 #include <linux/string.h>
27 #include <linux/rculist.h>
28
29 #include "trace_probe.h"
30
31 #define UPROBE_EVENT_SYSTEM "uprobes"
32
33 struct uprobe_trace_entry_head {
34 struct trace_entry ent;
35 unsigned long vaddr[];
36 };
37
38 #define SIZEOF_TRACE_ENTRY(is_return) \
39 (sizeof(struct uprobe_trace_entry_head) + \
40 sizeof(unsigned long) * (is_return ? 2 : 1))
41
42 #define DATAOF_TRACE_ENTRY(entry, is_return) \
43 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
44
45 struct trace_uprobe_filter {
46 rwlock_t rwlock;
47 int nr_systemwide;
48 struct list_head perf_events;
49 };
50
51 /*
52 * uprobe event core functions
53 */
54 struct trace_uprobe {
55 struct list_head list;
56 struct trace_uprobe_filter filter;
57 struct uprobe_consumer consumer;
58 struct inode *inode;
59 char *filename;
60 unsigned long offset;
61 unsigned long nhit;
62 struct trace_probe tp;
63 };
64
65 #define SIZEOF_TRACE_UPROBE(n) \
66 (offsetof(struct trace_uprobe, tp.args) + \
67 (sizeof(struct probe_arg) * (n)))
68
69 static int register_uprobe_event(struct trace_uprobe *tu);
70 static int unregister_uprobe_event(struct trace_uprobe *tu);
71
72 static DEFINE_MUTEX(uprobe_lock);
73 static LIST_HEAD(uprobe_list);
74
75 struct uprobe_dispatch_data {
76 struct trace_uprobe *tu;
77 unsigned long bp_addr;
78 };
79
80 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
81 static int uretprobe_dispatcher(struct uprobe_consumer *con,
82 unsigned long func, struct pt_regs *regs);
83
84 #ifdef CONFIG_STACK_GROWSUP
85 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
86 {
87 return addr - (n * sizeof(long));
88 }
89 #else
90 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
91 {
92 return addr + (n * sizeof(long));
93 }
94 #endif
95
96 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
97 {
98 unsigned long ret;
99 unsigned long addr = user_stack_pointer(regs);
100
101 addr = adjust_stack_addr(addr, n);
102
103 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
104 return 0;
105
106 return ret;
107 }
108
109 /*
110 * Uprobes-specific fetch functions
111 */
112 #define DEFINE_FETCH_stack(type) \
113 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
114 void *offset, void *dest) \
115 { \
116 *(type *)dest = (type)get_user_stack_nth(regs, \
117 ((unsigned long)offset)); \
118 }
119 DEFINE_BASIC_FETCH_FUNCS(stack)
120 /* No string on the stack entry */
121 #define fetch_stack_string NULL
122 #define fetch_stack_string_size NULL
123
124 #define DEFINE_FETCH_memory(type) \
125 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
126 void *addr, void *dest) \
127 { \
128 type retval; \
129 void __user *vaddr = (void __force __user *) addr; \
130 \
131 if (copy_from_user(&retval, vaddr, sizeof(type))) \
132 *(type *)dest = 0; \
133 else \
134 *(type *) dest = retval; \
135 }
136 DEFINE_BASIC_FETCH_FUNCS(memory)
137 /*
138 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
139 * length and relative data location.
140 */
141 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
142 void *addr, void *dest)
143 {
144 long ret;
145 u32 rloc = *(u32 *)dest;
146 int maxlen = get_rloc_len(rloc);
147 u8 *dst = get_rloc_data(dest);
148 void __user *src = (void __force __user *) addr;
149
150 if (!maxlen)
151 return;
152
153 ret = strncpy_from_user(dst, src, maxlen);
154 if (ret == maxlen)
155 dst[--ret] = '\0';
156
157 if (ret < 0) { /* Failed to fetch string */
158 ((u8 *)get_rloc_data(dest))[0] = '\0';
159 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
160 } else {
161 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
162 }
163 }
164
165 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
166 void *addr, void *dest)
167 {
168 int len;
169 void __user *vaddr = (void __force __user *) addr;
170
171 len = strnlen_user(vaddr, MAX_STRING_SIZE);
172
173 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
174 *(u32 *)dest = 0;
175 else
176 *(u32 *)dest = len;
177 }
178
179 static unsigned long translate_user_vaddr(void *file_offset)
180 {
181 unsigned long base_addr;
182 struct uprobe_dispatch_data *udd;
183
184 udd = (void *) current->utask->vaddr;
185
186 base_addr = udd->bp_addr - udd->tu->offset;
187 return base_addr + (unsigned long)file_offset;
188 }
189
190 #define DEFINE_FETCH_file_offset(type) \
191 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
192 void *offset, void *dest)\
193 { \
194 void *vaddr = (void *)translate_user_vaddr(offset); \
195 \
196 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
197 }
198 DEFINE_BASIC_FETCH_FUNCS(file_offset)
199 DEFINE_FETCH_file_offset(string)
200 DEFINE_FETCH_file_offset(string_size)
201
202 /* Fetch type information table */
203 static const struct fetch_type uprobes_fetch_type_table[] = {
204 /* Special types */
205 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
206 sizeof(u32), 1, "__data_loc char[]"),
207 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
208 string_size, sizeof(u32), 0, "u32"),
209 /* Basic types */
210 ASSIGN_FETCH_TYPE(u8, u8, 0),
211 ASSIGN_FETCH_TYPE(u16, u16, 0),
212 ASSIGN_FETCH_TYPE(u32, u32, 0),
213 ASSIGN_FETCH_TYPE(u64, u64, 0),
214 ASSIGN_FETCH_TYPE(s8, u8, 1),
215 ASSIGN_FETCH_TYPE(s16, u16, 1),
216 ASSIGN_FETCH_TYPE(s32, u32, 1),
217 ASSIGN_FETCH_TYPE(s64, u64, 1),
218 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
219 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
220 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
221 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
222
223 ASSIGN_FETCH_TYPE_END
224 };
225
226 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
227 {
228 rwlock_init(&filter->rwlock);
229 filter->nr_systemwide = 0;
230 INIT_LIST_HEAD(&filter->perf_events);
231 }
232
233 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
234 {
235 return !filter->nr_systemwide && list_empty(&filter->perf_events);
236 }
237
238 static inline bool is_ret_probe(struct trace_uprobe *tu)
239 {
240 return tu->consumer.ret_handler != NULL;
241 }
242
243 /*
244 * Allocate new trace_uprobe and initialize it (including uprobes).
245 */
246 static struct trace_uprobe *
247 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
248 {
249 struct trace_uprobe *tu;
250
251 if (!event || !is_good_name(event))
252 return ERR_PTR(-EINVAL);
253
254 if (!group || !is_good_name(group))
255 return ERR_PTR(-EINVAL);
256
257 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
258 if (!tu)
259 return ERR_PTR(-ENOMEM);
260
261 tu->tp.call.class = &tu->tp.class;
262 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
263 if (!tu->tp.call.name)
264 goto error;
265
266 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
267 if (!tu->tp.class.system)
268 goto error;
269
270 INIT_LIST_HEAD(&tu->list);
271 INIT_LIST_HEAD(&tu->tp.files);
272 tu->consumer.handler = uprobe_dispatcher;
273 if (is_ret)
274 tu->consumer.ret_handler = uretprobe_dispatcher;
275 init_trace_uprobe_filter(&tu->filter);
276 return tu;
277
278 error:
279 kfree(tu->tp.call.name);
280 kfree(tu);
281
282 return ERR_PTR(-ENOMEM);
283 }
284
285 static void free_trace_uprobe(struct trace_uprobe *tu)
286 {
287 int i;
288
289 for (i = 0; i < tu->tp.nr_args; i++)
290 traceprobe_free_probe_arg(&tu->tp.args[i]);
291
292 iput(tu->inode);
293 kfree(tu->tp.call.class->system);
294 kfree(tu->tp.call.name);
295 kfree(tu->filename);
296 kfree(tu);
297 }
298
299 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
300 {
301 struct trace_uprobe *tu;
302
303 list_for_each_entry(tu, &uprobe_list, list)
304 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
305 strcmp(tu->tp.call.class->system, group) == 0)
306 return tu;
307
308 return NULL;
309 }
310
311 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
312 static int unregister_trace_uprobe(struct trace_uprobe *tu)
313 {
314 int ret;
315
316 ret = unregister_uprobe_event(tu);
317 if (ret)
318 return ret;
319
320 list_del(&tu->list);
321 free_trace_uprobe(tu);
322 return 0;
323 }
324
325 /* Register a trace_uprobe and probe_event */
326 static int register_trace_uprobe(struct trace_uprobe *tu)
327 {
328 struct trace_uprobe *old_tu;
329 int ret;
330
331 mutex_lock(&uprobe_lock);
332
333 /* register as an event */
334 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
335 tu->tp.call.class->system);
336 if (old_tu) {
337 /* delete old event */
338 ret = unregister_trace_uprobe(old_tu);
339 if (ret)
340 goto end;
341 }
342
343 ret = register_uprobe_event(tu);
344 if (ret) {
345 pr_warn("Failed to register probe event(%d)\n", ret);
346 goto end;
347 }
348
349 list_add_tail(&tu->list, &uprobe_list);
350
351 end:
352 mutex_unlock(&uprobe_lock);
353
354 return ret;
355 }
356
357 /*
358 * Argument syntax:
359 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
360 *
361 * - Remove uprobe: -:[GRP/]EVENT
362 */
363 static int create_trace_uprobe(int argc, char **argv)
364 {
365 struct trace_uprobe *tu;
366 struct inode *inode;
367 char *arg, *event, *group, *filename;
368 char buf[MAX_EVENT_NAME_LEN];
369 struct path path;
370 unsigned long offset;
371 bool is_delete, is_return;
372 int i, ret;
373
374 inode = NULL;
375 ret = 0;
376 is_delete = false;
377 is_return = false;
378 event = NULL;
379 group = NULL;
380
381 /* argc must be >= 1 */
382 if (argv[0][0] == '-')
383 is_delete = true;
384 else if (argv[0][0] == 'r')
385 is_return = true;
386 else if (argv[0][0] != 'p') {
387 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
388 return -EINVAL;
389 }
390
391 if (argv[0][1] == ':') {
392 event = &argv[0][2];
393 arg = strchr(event, '/');
394
395 if (arg) {
396 group = event;
397 event = arg + 1;
398 event[-1] = '\0';
399
400 if (strlen(group) == 0) {
401 pr_info("Group name is not specified\n");
402 return -EINVAL;
403 }
404 }
405 if (strlen(event) == 0) {
406 pr_info("Event name is not specified\n");
407 return -EINVAL;
408 }
409 }
410 if (!group)
411 group = UPROBE_EVENT_SYSTEM;
412
413 if (is_delete) {
414 int ret;
415
416 if (!event) {
417 pr_info("Delete command needs an event name.\n");
418 return -EINVAL;
419 }
420 mutex_lock(&uprobe_lock);
421 tu = find_probe_event(event, group);
422
423 if (!tu) {
424 mutex_unlock(&uprobe_lock);
425 pr_info("Event %s/%s doesn't exist.\n", group, event);
426 return -ENOENT;
427 }
428 /* delete an event */
429 ret = unregister_trace_uprobe(tu);
430 mutex_unlock(&uprobe_lock);
431 return ret;
432 }
433
434 if (argc < 2) {
435 pr_info("Probe point is not specified.\n");
436 return -EINVAL;
437 }
438 /* Find the last occurrence, in case the path contains ':' too. */
439 arg = strrchr(argv[1], ':');
440 if (!arg) {
441 ret = -EINVAL;
442 goto fail_address_parse;
443 }
444
445 *arg++ = '\0';
446 filename = argv[1];
447 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
448 if (ret)
449 goto fail_address_parse;
450
451 inode = igrab(d_real_inode(path.dentry));
452 path_put(&path);
453
454 if (!inode || !S_ISREG(inode->i_mode)) {
455 ret = -EINVAL;
456 goto fail_address_parse;
457 }
458
459 ret = kstrtoul(arg, 0, &offset);
460 if (ret)
461 goto fail_address_parse;
462
463 argc -= 2;
464 argv += 2;
465
466 /* setup a probe */
467 if (!event) {
468 char *tail;
469 char *ptr;
470
471 tail = kstrdup(kbasename(filename), GFP_KERNEL);
472 if (!tail) {
473 ret = -ENOMEM;
474 goto fail_address_parse;
475 }
476
477 ptr = strpbrk(tail, ".-_");
478 if (ptr)
479 *ptr = '\0';
480
481 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
482 event = buf;
483 kfree(tail);
484 }
485
486 tu = alloc_trace_uprobe(group, event, argc, is_return);
487 if (IS_ERR(tu)) {
488 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
489 ret = PTR_ERR(tu);
490 goto fail_address_parse;
491 }
492 tu->offset = offset;
493 tu->inode = inode;
494 tu->filename = kstrdup(filename, GFP_KERNEL);
495
496 if (!tu->filename) {
497 pr_info("Failed to allocate filename.\n");
498 ret = -ENOMEM;
499 goto error;
500 }
501
502 /* parse arguments */
503 ret = 0;
504 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
505 struct probe_arg *parg = &tu->tp.args[i];
506
507 /* Increment count for freeing args in error case */
508 tu->tp.nr_args++;
509
510 /* Parse argument name */
511 arg = strchr(argv[i], '=');
512 if (arg) {
513 *arg++ = '\0';
514 parg->name = kstrdup(argv[i], GFP_KERNEL);
515 } else {
516 arg = argv[i];
517 /* If argument name is omitted, set "argN" */
518 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
519 parg->name = kstrdup(buf, GFP_KERNEL);
520 }
521
522 if (!parg->name) {
523 pr_info("Failed to allocate argument[%d] name.\n", i);
524 ret = -ENOMEM;
525 goto error;
526 }
527
528 if (!is_good_name(parg->name)) {
529 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
530 ret = -EINVAL;
531 goto error;
532 }
533
534 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
535 pr_info("Argument[%d] name '%s' conflicts with "
536 "another field.\n", i, argv[i]);
537 ret = -EINVAL;
538 goto error;
539 }
540
541 /* Parse fetch argument */
542 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
543 is_return, false,
544 uprobes_fetch_type_table);
545 if (ret) {
546 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
547 goto error;
548 }
549 }
550
551 ret = register_trace_uprobe(tu);
552 if (ret)
553 goto error;
554 return 0;
555
556 error:
557 free_trace_uprobe(tu);
558 return ret;
559
560 fail_address_parse:
561 iput(inode);
562
563 pr_info("Failed to parse address or file.\n");
564
565 return ret;
566 }
567
568 static int cleanup_all_probes(void)
569 {
570 struct trace_uprobe *tu;
571 int ret = 0;
572
573 mutex_lock(&uprobe_lock);
574 while (!list_empty(&uprobe_list)) {
575 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
576 ret = unregister_trace_uprobe(tu);
577 if (ret)
578 break;
579 }
580 mutex_unlock(&uprobe_lock);
581 return ret;
582 }
583
584 /* Probes listing interfaces */
585 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
586 {
587 mutex_lock(&uprobe_lock);
588 return seq_list_start(&uprobe_list, *pos);
589 }
590
591 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
592 {
593 return seq_list_next(v, &uprobe_list, pos);
594 }
595
596 static void probes_seq_stop(struct seq_file *m, void *v)
597 {
598 mutex_unlock(&uprobe_lock);
599 }
600
601 static int probes_seq_show(struct seq_file *m, void *v)
602 {
603 struct trace_uprobe *tu = v;
604 char c = is_ret_probe(tu) ? 'r' : 'p';
605 int i;
606
607 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
608 trace_event_name(&tu->tp.call), tu->filename,
609 (int)(sizeof(void *) * 2), tu->offset);
610
611 for (i = 0; i < tu->tp.nr_args; i++)
612 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
613
614 seq_putc(m, '\n');
615 return 0;
616 }
617
618 static const struct seq_operations probes_seq_op = {
619 .start = probes_seq_start,
620 .next = probes_seq_next,
621 .stop = probes_seq_stop,
622 .show = probes_seq_show
623 };
624
625 static int probes_open(struct inode *inode, struct file *file)
626 {
627 int ret;
628
629 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
630 ret = cleanup_all_probes();
631 if (ret)
632 return ret;
633 }
634
635 return seq_open(file, &probes_seq_op);
636 }
637
638 static ssize_t probes_write(struct file *file, const char __user *buffer,
639 size_t count, loff_t *ppos)
640 {
641 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
642 }
643
644 static const struct file_operations uprobe_events_ops = {
645 .owner = THIS_MODULE,
646 .open = probes_open,
647 .read = seq_read,
648 .llseek = seq_lseek,
649 .release = seq_release,
650 .write = probes_write,
651 };
652
653 /* Probes profiling interfaces */
654 static int probes_profile_seq_show(struct seq_file *m, void *v)
655 {
656 struct trace_uprobe *tu = v;
657
658 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
659 trace_event_name(&tu->tp.call), tu->nhit);
660 return 0;
661 }
662
663 static const struct seq_operations profile_seq_op = {
664 .start = probes_seq_start,
665 .next = probes_seq_next,
666 .stop = probes_seq_stop,
667 .show = probes_profile_seq_show
668 };
669
670 static int profile_open(struct inode *inode, struct file *file)
671 {
672 return seq_open(file, &profile_seq_op);
673 }
674
675 static const struct file_operations uprobe_profile_ops = {
676 .owner = THIS_MODULE,
677 .open = profile_open,
678 .read = seq_read,
679 .llseek = seq_lseek,
680 .release = seq_release,
681 };
682
683 struct uprobe_cpu_buffer {
684 struct mutex mutex;
685 void *buf;
686 };
687 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
688 static int uprobe_buffer_refcnt;
689
690 static int uprobe_buffer_init(void)
691 {
692 int cpu, err_cpu;
693
694 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
695 if (uprobe_cpu_buffer == NULL)
696 return -ENOMEM;
697
698 for_each_possible_cpu(cpu) {
699 struct page *p = alloc_pages_node(cpu_to_node(cpu),
700 GFP_KERNEL, 0);
701 if (p == NULL) {
702 err_cpu = cpu;
703 goto err;
704 }
705 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
706 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
707 }
708
709 return 0;
710
711 err:
712 for_each_possible_cpu(cpu) {
713 if (cpu == err_cpu)
714 break;
715 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
716 }
717
718 free_percpu(uprobe_cpu_buffer);
719 return -ENOMEM;
720 }
721
722 static int uprobe_buffer_enable(void)
723 {
724 int ret = 0;
725
726 BUG_ON(!mutex_is_locked(&event_mutex));
727
728 if (uprobe_buffer_refcnt++ == 0) {
729 ret = uprobe_buffer_init();
730 if (ret < 0)
731 uprobe_buffer_refcnt--;
732 }
733
734 return ret;
735 }
736
737 static void uprobe_buffer_disable(void)
738 {
739 int cpu;
740
741 BUG_ON(!mutex_is_locked(&event_mutex));
742
743 if (--uprobe_buffer_refcnt == 0) {
744 for_each_possible_cpu(cpu)
745 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
746 cpu)->buf);
747
748 free_percpu(uprobe_cpu_buffer);
749 uprobe_cpu_buffer = NULL;
750 }
751 }
752
753 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
754 {
755 struct uprobe_cpu_buffer *ucb;
756 int cpu;
757
758 cpu = raw_smp_processor_id();
759 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
760
761 /*
762 * Use per-cpu buffers for fastest access, but we might migrate
763 * so the mutex makes sure we have sole access to it.
764 */
765 mutex_lock(&ucb->mutex);
766
767 return ucb;
768 }
769
770 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
771 {
772 mutex_unlock(&ucb->mutex);
773 }
774
775 static void __uprobe_trace_func(struct trace_uprobe *tu,
776 unsigned long func, struct pt_regs *regs,
777 struct uprobe_cpu_buffer *ucb, int dsize,
778 struct trace_event_file *trace_file)
779 {
780 struct uprobe_trace_entry_head *entry;
781 struct ring_buffer_event *event;
782 struct ring_buffer *buffer;
783 void *data;
784 int size, esize;
785 struct trace_event_call *call = &tu->tp.call;
786
787 WARN_ON(call != trace_file->event_call);
788
789 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
790 return;
791
792 if (trace_trigger_soft_disabled(trace_file))
793 return;
794
795 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
796 size = esize + tu->tp.size + dsize;
797 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
798 call->event.type, size, 0, 0);
799 if (!event)
800 return;
801
802 entry = ring_buffer_event_data(event);
803 if (is_ret_probe(tu)) {
804 entry->vaddr[0] = func;
805 entry->vaddr[1] = instruction_pointer(regs);
806 data = DATAOF_TRACE_ENTRY(entry, true);
807 } else {
808 entry->vaddr[0] = instruction_pointer(regs);
809 data = DATAOF_TRACE_ENTRY(entry, false);
810 }
811
812 memcpy(data, ucb->buf, tu->tp.size + dsize);
813
814 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
815 }
816
817 /* uprobe handler */
818 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
819 struct uprobe_cpu_buffer *ucb, int dsize)
820 {
821 struct event_file_link *link;
822
823 if (is_ret_probe(tu))
824 return 0;
825
826 rcu_read_lock();
827 list_for_each_entry_rcu(link, &tu->tp.files, list)
828 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
829 rcu_read_unlock();
830
831 return 0;
832 }
833
834 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
835 struct pt_regs *regs,
836 struct uprobe_cpu_buffer *ucb, int dsize)
837 {
838 struct event_file_link *link;
839
840 rcu_read_lock();
841 list_for_each_entry_rcu(link, &tu->tp.files, list)
842 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
843 rcu_read_unlock();
844 }
845
846 /* Event entry printers */
847 static enum print_line_t
848 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
849 {
850 struct uprobe_trace_entry_head *entry;
851 struct trace_seq *s = &iter->seq;
852 struct trace_uprobe *tu;
853 u8 *data;
854 int i;
855
856 entry = (struct uprobe_trace_entry_head *)iter->ent;
857 tu = container_of(event, struct trace_uprobe, tp.call.event);
858
859 if (is_ret_probe(tu)) {
860 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
861 trace_event_name(&tu->tp.call),
862 entry->vaddr[1], entry->vaddr[0]);
863 data = DATAOF_TRACE_ENTRY(entry, true);
864 } else {
865 trace_seq_printf(s, "%s: (0x%lx)",
866 trace_event_name(&tu->tp.call),
867 entry->vaddr[0]);
868 data = DATAOF_TRACE_ENTRY(entry, false);
869 }
870
871 for (i = 0; i < tu->tp.nr_args; i++) {
872 struct probe_arg *parg = &tu->tp.args[i];
873
874 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
875 goto out;
876 }
877
878 trace_seq_putc(s, '\n');
879
880 out:
881 return trace_handle_return(s);
882 }
883
884 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
885 enum uprobe_filter_ctx ctx,
886 struct mm_struct *mm);
887
888 static int
889 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
890 filter_func_t filter)
891 {
892 bool enabled = trace_probe_is_enabled(&tu->tp);
893 struct event_file_link *link = NULL;
894 int ret;
895
896 if (file) {
897 if (tu->tp.flags & TP_FLAG_PROFILE)
898 return -EINTR;
899
900 link = kmalloc(sizeof(*link), GFP_KERNEL);
901 if (!link)
902 return -ENOMEM;
903
904 link->file = file;
905 list_add_tail_rcu(&link->list, &tu->tp.files);
906
907 tu->tp.flags |= TP_FLAG_TRACE;
908 } else {
909 if (tu->tp.flags & TP_FLAG_TRACE)
910 return -EINTR;
911
912 tu->tp.flags |= TP_FLAG_PROFILE;
913 }
914
915 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
916
917 if (enabled)
918 return 0;
919
920 ret = uprobe_buffer_enable();
921 if (ret)
922 goto err_flags;
923
924 tu->consumer.filter = filter;
925 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
926 if (ret)
927 goto err_buffer;
928
929 return 0;
930
931 err_buffer:
932 uprobe_buffer_disable();
933
934 err_flags:
935 if (file) {
936 list_del(&link->list);
937 kfree(link);
938 tu->tp.flags &= ~TP_FLAG_TRACE;
939 } else {
940 tu->tp.flags &= ~TP_FLAG_PROFILE;
941 }
942 return ret;
943 }
944
945 static void
946 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
947 {
948 if (!trace_probe_is_enabled(&tu->tp))
949 return;
950
951 if (file) {
952 struct event_file_link *link;
953
954 link = find_event_file_link(&tu->tp, file);
955 if (!link)
956 return;
957
958 list_del_rcu(&link->list);
959 /* synchronize with u{,ret}probe_trace_func */
960 synchronize_sched();
961 kfree(link);
962
963 if (!list_empty(&tu->tp.files))
964 return;
965 }
966
967 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
968
969 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
970 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
971
972 uprobe_buffer_disable();
973 }
974
975 static int uprobe_event_define_fields(struct trace_event_call *event_call)
976 {
977 int ret, i, size;
978 struct uprobe_trace_entry_head field;
979 struct trace_uprobe *tu = event_call->data;
980
981 if (is_ret_probe(tu)) {
982 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
983 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
984 size = SIZEOF_TRACE_ENTRY(true);
985 } else {
986 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
987 size = SIZEOF_TRACE_ENTRY(false);
988 }
989 /* Set argument names as fields */
990 for (i = 0; i < tu->tp.nr_args; i++) {
991 struct probe_arg *parg = &tu->tp.args[i];
992
993 ret = trace_define_field(event_call, parg->type->fmttype,
994 parg->name, size + parg->offset,
995 parg->type->size, parg->type->is_signed,
996 FILTER_OTHER);
997
998 if (ret)
999 return ret;
1000 }
1001 return 0;
1002 }
1003
1004 #ifdef CONFIG_PERF_EVENTS
1005 static bool
1006 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1007 {
1008 struct perf_event *event;
1009
1010 if (filter->nr_systemwide)
1011 return true;
1012
1013 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1014 if (event->hw.target->mm == mm)
1015 return true;
1016 }
1017
1018 return false;
1019 }
1020
1021 static inline bool
1022 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1023 {
1024 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1025 }
1026
1027 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1028 {
1029 bool done;
1030
1031 write_lock(&tu->filter.rwlock);
1032 if (event->hw.target) {
1033 list_del(&event->hw.tp_list);
1034 done = tu->filter.nr_systemwide ||
1035 (event->hw.target->flags & PF_EXITING) ||
1036 uprobe_filter_event(tu, event);
1037 } else {
1038 tu->filter.nr_systemwide--;
1039 done = tu->filter.nr_systemwide;
1040 }
1041 write_unlock(&tu->filter.rwlock);
1042
1043 if (!done)
1044 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1045
1046 return 0;
1047 }
1048
1049 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1050 {
1051 bool done;
1052 int err;
1053
1054 write_lock(&tu->filter.rwlock);
1055 if (event->hw.target) {
1056 /*
1057 * event->parent != NULL means copy_process(), we can avoid
1058 * uprobe_apply(). current->mm must be probed and we can rely
1059 * on dup_mmap() which preserves the already installed bp's.
1060 *
1061 * attr.enable_on_exec means that exec/mmap will install the
1062 * breakpoints we need.
1063 */
1064 done = tu->filter.nr_systemwide ||
1065 event->parent || event->attr.enable_on_exec ||
1066 uprobe_filter_event(tu, event);
1067 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1068 } else {
1069 done = tu->filter.nr_systemwide;
1070 tu->filter.nr_systemwide++;
1071 }
1072 write_unlock(&tu->filter.rwlock);
1073
1074 err = 0;
1075 if (!done) {
1076 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1077 if (err)
1078 uprobe_perf_close(tu, event);
1079 }
1080 return err;
1081 }
1082
1083 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1084 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1085 {
1086 struct trace_uprobe *tu;
1087 int ret;
1088
1089 tu = container_of(uc, struct trace_uprobe, consumer);
1090 read_lock(&tu->filter.rwlock);
1091 ret = __uprobe_perf_filter(&tu->filter, mm);
1092 read_unlock(&tu->filter.rwlock);
1093
1094 return ret;
1095 }
1096
1097 static void __uprobe_perf_func(struct trace_uprobe *tu,
1098 unsigned long func, struct pt_regs *regs,
1099 struct uprobe_cpu_buffer *ucb, int dsize)
1100 {
1101 struct trace_event_call *call = &tu->tp.call;
1102 struct uprobe_trace_entry_head *entry;
1103 struct hlist_head *head;
1104 void *data;
1105 int size, esize;
1106 int rctx;
1107
1108 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1109 return;
1110
1111 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1112
1113 size = esize + tu->tp.size + dsize;
1114 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1115 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1116 return;
1117
1118 preempt_disable();
1119 head = this_cpu_ptr(call->perf_events);
1120 if (hlist_empty(head))
1121 goto out;
1122
1123 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1124 if (!entry)
1125 goto out;
1126
1127 if (is_ret_probe(tu)) {
1128 entry->vaddr[0] = func;
1129 entry->vaddr[1] = instruction_pointer(regs);
1130 data = DATAOF_TRACE_ENTRY(entry, true);
1131 } else {
1132 entry->vaddr[0] = instruction_pointer(regs);
1133 data = DATAOF_TRACE_ENTRY(entry, false);
1134 }
1135
1136 memcpy(data, ucb->buf, tu->tp.size + dsize);
1137
1138 if (size - esize > tu->tp.size + dsize) {
1139 int len = tu->tp.size + dsize;
1140
1141 memset(data + len, 0, size - esize - len);
1142 }
1143
1144 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1145 head, NULL);
1146 out:
1147 preempt_enable();
1148 }
1149
1150 /* uprobe profile handler */
1151 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1152 struct uprobe_cpu_buffer *ucb, int dsize)
1153 {
1154 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1155 return UPROBE_HANDLER_REMOVE;
1156
1157 if (!is_ret_probe(tu))
1158 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1159 return 0;
1160 }
1161
1162 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1163 struct pt_regs *regs,
1164 struct uprobe_cpu_buffer *ucb, int dsize)
1165 {
1166 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1167 }
1168 #endif /* CONFIG_PERF_EVENTS */
1169
1170 static int
1171 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1172 void *data)
1173 {
1174 struct trace_uprobe *tu = event->data;
1175 struct trace_event_file *file = data;
1176
1177 switch (type) {
1178 case TRACE_REG_REGISTER:
1179 return probe_event_enable(tu, file, NULL);
1180
1181 case TRACE_REG_UNREGISTER:
1182 probe_event_disable(tu, file);
1183 return 0;
1184
1185 #ifdef CONFIG_PERF_EVENTS
1186 case TRACE_REG_PERF_REGISTER:
1187 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1188
1189 case TRACE_REG_PERF_UNREGISTER:
1190 probe_event_disable(tu, NULL);
1191 return 0;
1192
1193 case TRACE_REG_PERF_OPEN:
1194 return uprobe_perf_open(tu, data);
1195
1196 case TRACE_REG_PERF_CLOSE:
1197 return uprobe_perf_close(tu, data);
1198
1199 #endif
1200 default:
1201 return 0;
1202 }
1203 return 0;
1204 }
1205
1206 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1207 {
1208 struct trace_uprobe *tu;
1209 struct uprobe_dispatch_data udd;
1210 struct uprobe_cpu_buffer *ucb;
1211 int dsize, esize;
1212 int ret = 0;
1213
1214
1215 tu = container_of(con, struct trace_uprobe, consumer);
1216 tu->nhit++;
1217
1218 udd.tu = tu;
1219 udd.bp_addr = instruction_pointer(regs);
1220
1221 current->utask->vaddr = (unsigned long) &udd;
1222
1223 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1224 return 0;
1225
1226 dsize = __get_data_size(&tu->tp, regs);
1227 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1228
1229 ucb = uprobe_buffer_get();
1230 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1231
1232 if (tu->tp.flags & TP_FLAG_TRACE)
1233 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1234
1235 #ifdef CONFIG_PERF_EVENTS
1236 if (tu->tp.flags & TP_FLAG_PROFILE)
1237 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1238 #endif
1239 uprobe_buffer_put(ucb);
1240 return ret;
1241 }
1242
1243 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1244 unsigned long func, struct pt_regs *regs)
1245 {
1246 struct trace_uprobe *tu;
1247 struct uprobe_dispatch_data udd;
1248 struct uprobe_cpu_buffer *ucb;
1249 int dsize, esize;
1250
1251 tu = container_of(con, struct trace_uprobe, consumer);
1252
1253 udd.tu = tu;
1254 udd.bp_addr = func;
1255
1256 current->utask->vaddr = (unsigned long) &udd;
1257
1258 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1259 return 0;
1260
1261 dsize = __get_data_size(&tu->tp, regs);
1262 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1263
1264 ucb = uprobe_buffer_get();
1265 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1266
1267 if (tu->tp.flags & TP_FLAG_TRACE)
1268 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1269
1270 #ifdef CONFIG_PERF_EVENTS
1271 if (tu->tp.flags & TP_FLAG_PROFILE)
1272 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1273 #endif
1274 uprobe_buffer_put(ucb);
1275 return 0;
1276 }
1277
1278 static struct trace_event_functions uprobe_funcs = {
1279 .trace = print_uprobe_event
1280 };
1281
1282 static inline void init_trace_event_call(struct trace_uprobe *tu,
1283 struct trace_event_call *call)
1284 {
1285 INIT_LIST_HEAD(&call->class->fields);
1286 call->event.funcs = &uprobe_funcs;
1287 call->class->define_fields = uprobe_event_define_fields;
1288
1289 call->flags = TRACE_EVENT_FL_UPROBE;
1290 call->class->reg = trace_uprobe_register;
1291 call->data = tu;
1292 }
1293
1294 static int register_uprobe_event(struct trace_uprobe *tu)
1295 {
1296 struct trace_event_call *call = &tu->tp.call;
1297 int ret = 0;
1298
1299 init_trace_event_call(tu, call);
1300
1301 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1302 return -ENOMEM;
1303
1304 ret = register_trace_event(&call->event);
1305 if (!ret) {
1306 kfree(call->print_fmt);
1307 return -ENODEV;
1308 }
1309
1310 ret = trace_add_event_call(call);
1311
1312 if (ret) {
1313 pr_info("Failed to register uprobe event: %s\n",
1314 trace_event_name(call));
1315 kfree(call->print_fmt);
1316 unregister_trace_event(&call->event);
1317 }
1318
1319 return ret;
1320 }
1321
1322 static int unregister_uprobe_event(struct trace_uprobe *tu)
1323 {
1324 int ret;
1325
1326 /* tu->event is unregistered in trace_remove_event_call() */
1327 ret = trace_remove_event_call(&tu->tp.call);
1328 if (ret)
1329 return ret;
1330 kfree(tu->tp.call.print_fmt);
1331 tu->tp.call.print_fmt = NULL;
1332 return 0;
1333 }
1334
1335 #ifdef CONFIG_PERF_EVENTS
1336 struct trace_event_call *
1337 create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
1338 {
1339 struct trace_uprobe *tu;
1340 struct inode *inode;
1341 struct path path;
1342 int ret;
1343
1344 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1345 if (ret)
1346 return ERR_PTR(ret);
1347
1348 inode = igrab(d_inode(path.dentry));
1349 path_put(&path);
1350
1351 if (!inode || !S_ISREG(inode->i_mode)) {
1352 iput(inode);
1353 return ERR_PTR(-EINVAL);
1354 }
1355
1356 /*
1357 * local trace_kprobes are not added to probe_list, so they are never
1358 * searched in find_trace_kprobe(). Therefore, there is no concern of
1359 * duplicated name "DUMMY_EVENT" here.
1360 */
1361 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1362 is_return);
1363
1364 if (IS_ERR(tu)) {
1365 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1366 (int)PTR_ERR(tu));
1367 return ERR_CAST(tu);
1368 }
1369
1370 tu->offset = offs;
1371 tu->inode = inode;
1372 tu->filename = kstrdup(name, GFP_KERNEL);
1373 init_trace_event_call(tu, &tu->tp.call);
1374
1375 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1376 ret = -ENOMEM;
1377 goto error;
1378 }
1379
1380 return &tu->tp.call;
1381 error:
1382 free_trace_uprobe(tu);
1383 return ERR_PTR(ret);
1384 }
1385
1386 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1387 {
1388 struct trace_uprobe *tu;
1389
1390 tu = container_of(event_call, struct trace_uprobe, tp.call);
1391
1392 kfree(tu->tp.call.print_fmt);
1393 tu->tp.call.print_fmt = NULL;
1394
1395 free_trace_uprobe(tu);
1396 }
1397 #endif /* CONFIG_PERF_EVENTS */
1398
1399 /* Make a trace interface for controling probe points */
1400 static __init int init_uprobe_trace(void)
1401 {
1402 struct dentry *d_tracer;
1403
1404 d_tracer = tracing_init_dentry();
1405 if (IS_ERR(d_tracer))
1406 return 0;
1407
1408 trace_create_file("uprobe_events", 0644, d_tracer,
1409 NULL, &uprobe_events_ops);
1410 /* Profile interface */
1411 trace_create_file("uprobe_profile", 0444, d_tracer,
1412 NULL, &uprobe_profile_ops);
1413 return 0;
1414 }
1415
1416 fs_initcall(init_uprobe_trace);