2 * uprobes-based tracing events
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
20 #define pr_fmt(fmt) "trace_kprobe: " fmt
22 #include <linux/module.h>
23 #include <linux/uaccess.h>
24 #include <linux/uprobes.h>
25 #include <linux/namei.h>
26 #include <linux/string.h>
27 #include <linux/rculist.h>
29 #include "trace_probe.h"
31 #define UPROBE_EVENT_SYSTEM "uprobes"
33 struct uprobe_trace_entry_head
{
34 struct trace_entry ent
;
35 unsigned long vaddr
[];
38 #define SIZEOF_TRACE_ENTRY(is_return) \
39 (sizeof(struct uprobe_trace_entry_head) + \
40 sizeof(unsigned long) * (is_return ? 2 : 1))
42 #define DATAOF_TRACE_ENTRY(entry, is_return) \
43 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
45 struct trace_uprobe_filter
{
48 struct list_head perf_events
;
52 * uprobe event core functions
55 struct list_head list
;
56 struct trace_uprobe_filter filter
;
57 struct uprobe_consumer consumer
;
62 struct trace_probe tp
;
65 #define SIZEOF_TRACE_UPROBE(n) \
66 (offsetof(struct trace_uprobe, tp.args) + \
67 (sizeof(struct probe_arg) * (n)))
69 static int register_uprobe_event(struct trace_uprobe
*tu
);
70 static int unregister_uprobe_event(struct trace_uprobe
*tu
);
72 static DEFINE_MUTEX(uprobe_lock
);
73 static LIST_HEAD(uprobe_list
);
75 struct uprobe_dispatch_data
{
76 struct trace_uprobe
*tu
;
77 unsigned long bp_addr
;
80 static int uprobe_dispatcher(struct uprobe_consumer
*con
, struct pt_regs
*regs
);
81 static int uretprobe_dispatcher(struct uprobe_consumer
*con
,
82 unsigned long func
, struct pt_regs
*regs
);
84 #ifdef CONFIG_STACK_GROWSUP
85 static unsigned long adjust_stack_addr(unsigned long addr
, unsigned int n
)
87 return addr
- (n
* sizeof(long));
90 static unsigned long adjust_stack_addr(unsigned long addr
, unsigned int n
)
92 return addr
+ (n
* sizeof(long));
96 static unsigned long get_user_stack_nth(struct pt_regs
*regs
, unsigned int n
)
99 unsigned long addr
= user_stack_pointer(regs
);
101 addr
= adjust_stack_addr(addr
, n
);
103 if (copy_from_user(&ret
, (void __force __user
*) addr
, sizeof(ret
)))
110 * Uprobes-specific fetch functions
112 #define DEFINE_FETCH_stack(type) \
113 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
114 void *offset, void *dest) \
116 *(type *)dest = (type)get_user_stack_nth(regs, \
117 ((unsigned long)offset)); \
119 DEFINE_BASIC_FETCH_FUNCS(stack
)
120 /* No string on the stack entry */
121 #define fetch_stack_string NULL
122 #define fetch_stack_string_size NULL
124 #define DEFINE_FETCH_memory(type) \
125 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
126 void *addr, void *dest) \
129 void __user *vaddr = (void __force __user *) addr; \
131 if (copy_from_user(&retval, vaddr, sizeof(type))) \
134 *(type *) dest = retval; \
136 DEFINE_BASIC_FETCH_FUNCS(memory
)
138 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
139 * length and relative data location.
141 static void FETCH_FUNC_NAME(memory
, string
)(struct pt_regs
*regs
,
142 void *addr
, void *dest
)
145 u32 rloc
= *(u32
*)dest
;
146 int maxlen
= get_rloc_len(rloc
);
147 u8
*dst
= get_rloc_data(dest
);
148 void __user
*src
= (void __force __user
*) addr
;
153 ret
= strncpy_from_user(dst
, src
, maxlen
);
157 if (ret
< 0) { /* Failed to fetch string */
158 ((u8
*)get_rloc_data(dest
))[0] = '\0';
159 *(u32
*)dest
= make_data_rloc(0, get_rloc_offs(rloc
));
161 *(u32
*)dest
= make_data_rloc(ret
, get_rloc_offs(rloc
));
165 static void FETCH_FUNC_NAME(memory
, string_size
)(struct pt_regs
*regs
,
166 void *addr
, void *dest
)
169 void __user
*vaddr
= (void __force __user
*) addr
;
171 len
= strnlen_user(vaddr
, MAX_STRING_SIZE
);
173 if (len
== 0 || len
> MAX_STRING_SIZE
) /* Failed to check length */
179 static unsigned long translate_user_vaddr(void *file_offset
)
181 unsigned long base_addr
;
182 struct uprobe_dispatch_data
*udd
;
184 udd
= (void *) current
->utask
->vaddr
;
186 base_addr
= udd
->bp_addr
- udd
->tu
->offset
;
187 return base_addr
+ (unsigned long)file_offset
;
190 #define DEFINE_FETCH_file_offset(type) \
191 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
192 void *offset, void *dest)\
194 void *vaddr = (void *)translate_user_vaddr(offset); \
196 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
198 DEFINE_BASIC_FETCH_FUNCS(file_offset
)
199 DEFINE_FETCH_file_offset(string
)
200 DEFINE_FETCH_file_offset(string_size
)
202 /* Fetch type information table */
203 static const struct fetch_type uprobes_fetch_type_table
[] = {
205 [FETCH_TYPE_STRING
] = __ASSIGN_FETCH_TYPE("string", string
, string
,
206 sizeof(u32
), 1, "__data_loc char[]"),
207 [FETCH_TYPE_STRSIZE
] = __ASSIGN_FETCH_TYPE("string_size", u32
,
208 string_size
, sizeof(u32
), 0, "u32"),
210 ASSIGN_FETCH_TYPE(u8
, u8
, 0),
211 ASSIGN_FETCH_TYPE(u16
, u16
, 0),
212 ASSIGN_FETCH_TYPE(u32
, u32
, 0),
213 ASSIGN_FETCH_TYPE(u64
, u64
, 0),
214 ASSIGN_FETCH_TYPE(s8
, u8
, 1),
215 ASSIGN_FETCH_TYPE(s16
, u16
, 1),
216 ASSIGN_FETCH_TYPE(s32
, u32
, 1),
217 ASSIGN_FETCH_TYPE(s64
, u64
, 1),
218 ASSIGN_FETCH_TYPE_ALIAS(x8
, u8
, u8
, 0),
219 ASSIGN_FETCH_TYPE_ALIAS(x16
, u16
, u16
, 0),
220 ASSIGN_FETCH_TYPE_ALIAS(x32
, u32
, u32
, 0),
221 ASSIGN_FETCH_TYPE_ALIAS(x64
, u64
, u64
, 0),
223 ASSIGN_FETCH_TYPE_END
226 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter
*filter
)
228 rwlock_init(&filter
->rwlock
);
229 filter
->nr_systemwide
= 0;
230 INIT_LIST_HEAD(&filter
->perf_events
);
233 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter
*filter
)
235 return !filter
->nr_systemwide
&& list_empty(&filter
->perf_events
);
238 static inline bool is_ret_probe(struct trace_uprobe
*tu
)
240 return tu
->consumer
.ret_handler
!= NULL
;
244 * Allocate new trace_uprobe and initialize it (including uprobes).
246 static struct trace_uprobe
*
247 alloc_trace_uprobe(const char *group
, const char *event
, int nargs
, bool is_ret
)
249 struct trace_uprobe
*tu
;
251 if (!event
|| !is_good_name(event
))
252 return ERR_PTR(-EINVAL
);
254 if (!group
|| !is_good_name(group
))
255 return ERR_PTR(-EINVAL
);
257 tu
= kzalloc(SIZEOF_TRACE_UPROBE(nargs
), GFP_KERNEL
);
259 return ERR_PTR(-ENOMEM
);
261 tu
->tp
.call
.class = &tu
->tp
.class;
262 tu
->tp
.call
.name
= kstrdup(event
, GFP_KERNEL
);
263 if (!tu
->tp
.call
.name
)
266 tu
->tp
.class.system
= kstrdup(group
, GFP_KERNEL
);
267 if (!tu
->tp
.class.system
)
270 INIT_LIST_HEAD(&tu
->list
);
271 INIT_LIST_HEAD(&tu
->tp
.files
);
272 tu
->consumer
.handler
= uprobe_dispatcher
;
274 tu
->consumer
.ret_handler
= uretprobe_dispatcher
;
275 init_trace_uprobe_filter(&tu
->filter
);
279 kfree(tu
->tp
.call
.name
);
282 return ERR_PTR(-ENOMEM
);
285 static void free_trace_uprobe(struct trace_uprobe
*tu
)
289 for (i
= 0; i
< tu
->tp
.nr_args
; i
++)
290 traceprobe_free_probe_arg(&tu
->tp
.args
[i
]);
293 kfree(tu
->tp
.call
.class->system
);
294 kfree(tu
->tp
.call
.name
);
299 static struct trace_uprobe
*find_probe_event(const char *event
, const char *group
)
301 struct trace_uprobe
*tu
;
303 list_for_each_entry(tu
, &uprobe_list
, list
)
304 if (strcmp(trace_event_name(&tu
->tp
.call
), event
) == 0 &&
305 strcmp(tu
->tp
.call
.class->system
, group
) == 0)
311 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
312 static int unregister_trace_uprobe(struct trace_uprobe
*tu
)
316 ret
= unregister_uprobe_event(tu
);
321 free_trace_uprobe(tu
);
325 /* Register a trace_uprobe and probe_event */
326 static int register_trace_uprobe(struct trace_uprobe
*tu
)
328 struct trace_uprobe
*old_tu
;
331 mutex_lock(&uprobe_lock
);
333 /* register as an event */
334 old_tu
= find_probe_event(trace_event_name(&tu
->tp
.call
),
335 tu
->tp
.call
.class->system
);
337 /* delete old event */
338 ret
= unregister_trace_uprobe(old_tu
);
343 ret
= register_uprobe_event(tu
);
345 pr_warn("Failed to register probe event(%d)\n", ret
);
349 list_add_tail(&tu
->list
, &uprobe_list
);
352 mutex_unlock(&uprobe_lock
);
359 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
361 * - Remove uprobe: -:[GRP/]EVENT
363 static int create_trace_uprobe(int argc
, char **argv
)
365 struct trace_uprobe
*tu
;
367 char *arg
, *event
, *group
, *filename
;
368 char buf
[MAX_EVENT_NAME_LEN
];
370 unsigned long offset
;
371 bool is_delete
, is_return
;
381 /* argc must be >= 1 */
382 if (argv
[0][0] == '-')
384 else if (argv
[0][0] == 'r')
386 else if (argv
[0][0] != 'p') {
387 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
391 if (argv
[0][1] == ':') {
393 arg
= strchr(event
, '/');
400 if (strlen(group
) == 0) {
401 pr_info("Group name is not specified\n");
405 if (strlen(event
) == 0) {
406 pr_info("Event name is not specified\n");
411 group
= UPROBE_EVENT_SYSTEM
;
417 pr_info("Delete command needs an event name.\n");
420 mutex_lock(&uprobe_lock
);
421 tu
= find_probe_event(event
, group
);
424 mutex_unlock(&uprobe_lock
);
425 pr_info("Event %s/%s doesn't exist.\n", group
, event
);
428 /* delete an event */
429 ret
= unregister_trace_uprobe(tu
);
430 mutex_unlock(&uprobe_lock
);
435 pr_info("Probe point is not specified.\n");
438 /* Find the last occurrence, in case the path contains ':' too. */
439 arg
= strrchr(argv
[1], ':');
442 goto fail_address_parse
;
447 ret
= kern_path(filename
, LOOKUP_FOLLOW
, &path
);
449 goto fail_address_parse
;
451 inode
= igrab(d_real_inode(path
.dentry
));
454 if (!inode
|| !S_ISREG(inode
->i_mode
)) {
456 goto fail_address_parse
;
459 ret
= kstrtoul(arg
, 0, &offset
);
461 goto fail_address_parse
;
471 tail
= kstrdup(kbasename(filename
), GFP_KERNEL
);
474 goto fail_address_parse
;
477 ptr
= strpbrk(tail
, ".-_");
481 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c_%s_0x%lx", 'p', tail
, offset
);
486 tu
= alloc_trace_uprobe(group
, event
, argc
, is_return
);
488 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu
));
490 goto fail_address_parse
;
494 tu
->filename
= kstrdup(filename
, GFP_KERNEL
);
497 pr_info("Failed to allocate filename.\n");
502 /* parse arguments */
504 for (i
= 0; i
< argc
&& i
< MAX_TRACE_ARGS
; i
++) {
505 struct probe_arg
*parg
= &tu
->tp
.args
[i
];
507 /* Increment count for freeing args in error case */
510 /* Parse argument name */
511 arg
= strchr(argv
[i
], '=');
514 parg
->name
= kstrdup(argv
[i
], GFP_KERNEL
);
517 /* If argument name is omitted, set "argN" */
518 snprintf(buf
, MAX_EVENT_NAME_LEN
, "arg%d", i
+ 1);
519 parg
->name
= kstrdup(buf
, GFP_KERNEL
);
523 pr_info("Failed to allocate argument[%d] name.\n", i
);
528 if (!is_good_name(parg
->name
)) {
529 pr_info("Invalid argument[%d] name: %s\n", i
, parg
->name
);
534 if (traceprobe_conflict_field_name(parg
->name
, tu
->tp
.args
, i
)) {
535 pr_info("Argument[%d] name '%s' conflicts with "
536 "another field.\n", i
, argv
[i
]);
541 /* Parse fetch argument */
542 ret
= traceprobe_parse_probe_arg(arg
, &tu
->tp
.size
, parg
,
544 uprobes_fetch_type_table
);
546 pr_info("Parse error at argument[%d]. (%d)\n", i
, ret
);
551 ret
= register_trace_uprobe(tu
);
557 free_trace_uprobe(tu
);
563 pr_info("Failed to parse address or file.\n");
568 static int cleanup_all_probes(void)
570 struct trace_uprobe
*tu
;
573 mutex_lock(&uprobe_lock
);
574 while (!list_empty(&uprobe_list
)) {
575 tu
= list_entry(uprobe_list
.next
, struct trace_uprobe
, list
);
576 ret
= unregister_trace_uprobe(tu
);
580 mutex_unlock(&uprobe_lock
);
584 /* Probes listing interfaces */
585 static void *probes_seq_start(struct seq_file
*m
, loff_t
*pos
)
587 mutex_lock(&uprobe_lock
);
588 return seq_list_start(&uprobe_list
, *pos
);
591 static void *probes_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
593 return seq_list_next(v
, &uprobe_list
, pos
);
596 static void probes_seq_stop(struct seq_file
*m
, void *v
)
598 mutex_unlock(&uprobe_lock
);
601 static int probes_seq_show(struct seq_file
*m
, void *v
)
603 struct trace_uprobe
*tu
= v
;
604 char c
= is_ret_probe(tu
) ? 'r' : 'p';
607 seq_printf(m
, "%c:%s/%s %s:0x%0*lx", c
, tu
->tp
.call
.class->system
,
608 trace_event_name(&tu
->tp
.call
), tu
->filename
,
609 (int)(sizeof(void *) * 2), tu
->offset
);
611 for (i
= 0; i
< tu
->tp
.nr_args
; i
++)
612 seq_printf(m
, " %s=%s", tu
->tp
.args
[i
].name
, tu
->tp
.args
[i
].comm
);
618 static const struct seq_operations probes_seq_op
= {
619 .start
= probes_seq_start
,
620 .next
= probes_seq_next
,
621 .stop
= probes_seq_stop
,
622 .show
= probes_seq_show
625 static int probes_open(struct inode
*inode
, struct file
*file
)
629 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
630 ret
= cleanup_all_probes();
635 return seq_open(file
, &probes_seq_op
);
638 static ssize_t
probes_write(struct file
*file
, const char __user
*buffer
,
639 size_t count
, loff_t
*ppos
)
641 return trace_parse_run_command(file
, buffer
, count
, ppos
, create_trace_uprobe
);
644 static const struct file_operations uprobe_events_ops
= {
645 .owner
= THIS_MODULE
,
649 .release
= seq_release
,
650 .write
= probes_write
,
653 /* Probes profiling interfaces */
654 static int probes_profile_seq_show(struct seq_file
*m
, void *v
)
656 struct trace_uprobe
*tu
= v
;
658 seq_printf(m
, " %s %-44s %15lu\n", tu
->filename
,
659 trace_event_name(&tu
->tp
.call
), tu
->nhit
);
663 static const struct seq_operations profile_seq_op
= {
664 .start
= probes_seq_start
,
665 .next
= probes_seq_next
,
666 .stop
= probes_seq_stop
,
667 .show
= probes_profile_seq_show
670 static int profile_open(struct inode
*inode
, struct file
*file
)
672 return seq_open(file
, &profile_seq_op
);
675 static const struct file_operations uprobe_profile_ops
= {
676 .owner
= THIS_MODULE
,
677 .open
= profile_open
,
680 .release
= seq_release
,
683 struct uprobe_cpu_buffer
{
687 static struct uprobe_cpu_buffer __percpu
*uprobe_cpu_buffer
;
688 static int uprobe_buffer_refcnt
;
690 static int uprobe_buffer_init(void)
694 uprobe_cpu_buffer
= alloc_percpu(struct uprobe_cpu_buffer
);
695 if (uprobe_cpu_buffer
== NULL
)
698 for_each_possible_cpu(cpu
) {
699 struct page
*p
= alloc_pages_node(cpu_to_node(cpu
),
705 per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->buf
= page_address(p
);
706 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->mutex
);
712 for_each_possible_cpu(cpu
) {
715 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->buf
);
718 free_percpu(uprobe_cpu_buffer
);
722 static int uprobe_buffer_enable(void)
726 BUG_ON(!mutex_is_locked(&event_mutex
));
728 if (uprobe_buffer_refcnt
++ == 0) {
729 ret
= uprobe_buffer_init();
731 uprobe_buffer_refcnt
--;
737 static void uprobe_buffer_disable(void)
741 BUG_ON(!mutex_is_locked(&event_mutex
));
743 if (--uprobe_buffer_refcnt
== 0) {
744 for_each_possible_cpu(cpu
)
745 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer
,
748 free_percpu(uprobe_cpu_buffer
);
749 uprobe_cpu_buffer
= NULL
;
753 static struct uprobe_cpu_buffer
*uprobe_buffer_get(void)
755 struct uprobe_cpu_buffer
*ucb
;
758 cpu
= raw_smp_processor_id();
759 ucb
= per_cpu_ptr(uprobe_cpu_buffer
, cpu
);
762 * Use per-cpu buffers for fastest access, but we might migrate
763 * so the mutex makes sure we have sole access to it.
765 mutex_lock(&ucb
->mutex
);
770 static void uprobe_buffer_put(struct uprobe_cpu_buffer
*ucb
)
772 mutex_unlock(&ucb
->mutex
);
775 static void __uprobe_trace_func(struct trace_uprobe
*tu
,
776 unsigned long func
, struct pt_regs
*regs
,
777 struct uprobe_cpu_buffer
*ucb
, int dsize
,
778 struct trace_event_file
*trace_file
)
780 struct uprobe_trace_entry_head
*entry
;
781 struct ring_buffer_event
*event
;
782 struct ring_buffer
*buffer
;
785 struct trace_event_call
*call
= &tu
->tp
.call
;
787 WARN_ON(call
!= trace_file
->event_call
);
789 if (WARN_ON_ONCE(tu
->tp
.size
+ dsize
> PAGE_SIZE
))
792 if (trace_trigger_soft_disabled(trace_file
))
795 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
796 size
= esize
+ tu
->tp
.size
+ dsize
;
797 event
= trace_event_buffer_lock_reserve(&buffer
, trace_file
,
798 call
->event
.type
, size
, 0, 0);
802 entry
= ring_buffer_event_data(event
);
803 if (is_ret_probe(tu
)) {
804 entry
->vaddr
[0] = func
;
805 entry
->vaddr
[1] = instruction_pointer(regs
);
806 data
= DATAOF_TRACE_ENTRY(entry
, true);
808 entry
->vaddr
[0] = instruction_pointer(regs
);
809 data
= DATAOF_TRACE_ENTRY(entry
, false);
812 memcpy(data
, ucb
->buf
, tu
->tp
.size
+ dsize
);
814 event_trigger_unlock_commit(trace_file
, buffer
, event
, entry
, 0, 0);
818 static int uprobe_trace_func(struct trace_uprobe
*tu
, struct pt_regs
*regs
,
819 struct uprobe_cpu_buffer
*ucb
, int dsize
)
821 struct event_file_link
*link
;
823 if (is_ret_probe(tu
))
827 list_for_each_entry_rcu(link
, &tu
->tp
.files
, list
)
828 __uprobe_trace_func(tu
, 0, regs
, ucb
, dsize
, link
->file
);
834 static void uretprobe_trace_func(struct trace_uprobe
*tu
, unsigned long func
,
835 struct pt_regs
*regs
,
836 struct uprobe_cpu_buffer
*ucb
, int dsize
)
838 struct event_file_link
*link
;
841 list_for_each_entry_rcu(link
, &tu
->tp
.files
, list
)
842 __uprobe_trace_func(tu
, func
, regs
, ucb
, dsize
, link
->file
);
846 /* Event entry printers */
847 static enum print_line_t
848 print_uprobe_event(struct trace_iterator
*iter
, int flags
, struct trace_event
*event
)
850 struct uprobe_trace_entry_head
*entry
;
851 struct trace_seq
*s
= &iter
->seq
;
852 struct trace_uprobe
*tu
;
856 entry
= (struct uprobe_trace_entry_head
*)iter
->ent
;
857 tu
= container_of(event
, struct trace_uprobe
, tp
.call
.event
);
859 if (is_ret_probe(tu
)) {
860 trace_seq_printf(s
, "%s: (0x%lx <- 0x%lx)",
861 trace_event_name(&tu
->tp
.call
),
862 entry
->vaddr
[1], entry
->vaddr
[0]);
863 data
= DATAOF_TRACE_ENTRY(entry
, true);
865 trace_seq_printf(s
, "%s: (0x%lx)",
866 trace_event_name(&tu
->tp
.call
),
868 data
= DATAOF_TRACE_ENTRY(entry
, false);
871 for (i
= 0; i
< tu
->tp
.nr_args
; i
++) {
872 struct probe_arg
*parg
= &tu
->tp
.args
[i
];
874 if (!parg
->type
->print(s
, parg
->name
, data
+ parg
->offset
, entry
))
878 trace_seq_putc(s
, '\n');
881 return trace_handle_return(s
);
884 typedef bool (*filter_func_t
)(struct uprobe_consumer
*self
,
885 enum uprobe_filter_ctx ctx
,
886 struct mm_struct
*mm
);
889 probe_event_enable(struct trace_uprobe
*tu
, struct trace_event_file
*file
,
890 filter_func_t filter
)
892 bool enabled
= trace_probe_is_enabled(&tu
->tp
);
893 struct event_file_link
*link
= NULL
;
897 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
900 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
905 list_add_tail_rcu(&link
->list
, &tu
->tp
.files
);
907 tu
->tp
.flags
|= TP_FLAG_TRACE
;
909 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
912 tu
->tp
.flags
|= TP_FLAG_PROFILE
;
915 WARN_ON(!uprobe_filter_is_empty(&tu
->filter
));
920 ret
= uprobe_buffer_enable();
924 tu
->consumer
.filter
= filter
;
925 ret
= uprobe_register(tu
->inode
, tu
->offset
, &tu
->consumer
);
932 uprobe_buffer_disable();
936 list_del(&link
->list
);
938 tu
->tp
.flags
&= ~TP_FLAG_TRACE
;
940 tu
->tp
.flags
&= ~TP_FLAG_PROFILE
;
946 probe_event_disable(struct trace_uprobe
*tu
, struct trace_event_file
*file
)
948 if (!trace_probe_is_enabled(&tu
->tp
))
952 struct event_file_link
*link
;
954 link
= find_event_file_link(&tu
->tp
, file
);
958 list_del_rcu(&link
->list
);
959 /* synchronize with u{,ret}probe_trace_func */
963 if (!list_empty(&tu
->tp
.files
))
967 WARN_ON(!uprobe_filter_is_empty(&tu
->filter
));
969 uprobe_unregister(tu
->inode
, tu
->offset
, &tu
->consumer
);
970 tu
->tp
.flags
&= file
? ~TP_FLAG_TRACE
: ~TP_FLAG_PROFILE
;
972 uprobe_buffer_disable();
975 static int uprobe_event_define_fields(struct trace_event_call
*event_call
)
978 struct uprobe_trace_entry_head field
;
979 struct trace_uprobe
*tu
= event_call
->data
;
981 if (is_ret_probe(tu
)) {
982 DEFINE_FIELD(unsigned long, vaddr
[0], FIELD_STRING_FUNC
, 0);
983 DEFINE_FIELD(unsigned long, vaddr
[1], FIELD_STRING_RETIP
, 0);
984 size
= SIZEOF_TRACE_ENTRY(true);
986 DEFINE_FIELD(unsigned long, vaddr
[0], FIELD_STRING_IP
, 0);
987 size
= SIZEOF_TRACE_ENTRY(false);
989 /* Set argument names as fields */
990 for (i
= 0; i
< tu
->tp
.nr_args
; i
++) {
991 struct probe_arg
*parg
= &tu
->tp
.args
[i
];
993 ret
= trace_define_field(event_call
, parg
->type
->fmttype
,
994 parg
->name
, size
+ parg
->offset
,
995 parg
->type
->size
, parg
->type
->is_signed
,
1004 #ifdef CONFIG_PERF_EVENTS
1006 __uprobe_perf_filter(struct trace_uprobe_filter
*filter
, struct mm_struct
*mm
)
1008 struct perf_event
*event
;
1010 if (filter
->nr_systemwide
)
1013 list_for_each_entry(event
, &filter
->perf_events
, hw
.tp_list
) {
1014 if (event
->hw
.target
->mm
== mm
)
1022 uprobe_filter_event(struct trace_uprobe
*tu
, struct perf_event
*event
)
1024 return __uprobe_perf_filter(&tu
->filter
, event
->hw
.target
->mm
);
1027 static int uprobe_perf_close(struct trace_uprobe
*tu
, struct perf_event
*event
)
1031 write_lock(&tu
->filter
.rwlock
);
1032 if (event
->hw
.target
) {
1033 list_del(&event
->hw
.tp_list
);
1034 done
= tu
->filter
.nr_systemwide
||
1035 (event
->hw
.target
->flags
& PF_EXITING
) ||
1036 uprobe_filter_event(tu
, event
);
1038 tu
->filter
.nr_systemwide
--;
1039 done
= tu
->filter
.nr_systemwide
;
1041 write_unlock(&tu
->filter
.rwlock
);
1044 return uprobe_apply(tu
->inode
, tu
->offset
, &tu
->consumer
, false);
1049 static int uprobe_perf_open(struct trace_uprobe
*tu
, struct perf_event
*event
)
1054 write_lock(&tu
->filter
.rwlock
);
1055 if (event
->hw
.target
) {
1057 * event->parent != NULL means copy_process(), we can avoid
1058 * uprobe_apply(). current->mm must be probed and we can rely
1059 * on dup_mmap() which preserves the already installed bp's.
1061 * attr.enable_on_exec means that exec/mmap will install the
1062 * breakpoints we need.
1064 done
= tu
->filter
.nr_systemwide
||
1065 event
->parent
|| event
->attr
.enable_on_exec
||
1066 uprobe_filter_event(tu
, event
);
1067 list_add(&event
->hw
.tp_list
, &tu
->filter
.perf_events
);
1069 done
= tu
->filter
.nr_systemwide
;
1070 tu
->filter
.nr_systemwide
++;
1072 write_unlock(&tu
->filter
.rwlock
);
1076 err
= uprobe_apply(tu
->inode
, tu
->offset
, &tu
->consumer
, true);
1078 uprobe_perf_close(tu
, event
);
1083 static bool uprobe_perf_filter(struct uprobe_consumer
*uc
,
1084 enum uprobe_filter_ctx ctx
, struct mm_struct
*mm
)
1086 struct trace_uprobe
*tu
;
1089 tu
= container_of(uc
, struct trace_uprobe
, consumer
);
1090 read_lock(&tu
->filter
.rwlock
);
1091 ret
= __uprobe_perf_filter(&tu
->filter
, mm
);
1092 read_unlock(&tu
->filter
.rwlock
);
1097 static void __uprobe_perf_func(struct trace_uprobe
*tu
,
1098 unsigned long func
, struct pt_regs
*regs
,
1099 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1101 struct trace_event_call
*call
= &tu
->tp
.call
;
1102 struct uprobe_trace_entry_head
*entry
;
1103 struct hlist_head
*head
;
1108 if (bpf_prog_array_valid(call
) && !trace_call_bpf(call
, regs
))
1111 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1113 size
= esize
+ tu
->tp
.size
+ dsize
;
1114 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
)) - sizeof(u32
);
1115 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
, "profile buffer not large enough"))
1119 head
= this_cpu_ptr(call
->perf_events
);
1120 if (hlist_empty(head
))
1123 entry
= perf_trace_buf_alloc(size
, NULL
, &rctx
);
1127 if (is_ret_probe(tu
)) {
1128 entry
->vaddr
[0] = func
;
1129 entry
->vaddr
[1] = instruction_pointer(regs
);
1130 data
= DATAOF_TRACE_ENTRY(entry
, true);
1132 entry
->vaddr
[0] = instruction_pointer(regs
);
1133 data
= DATAOF_TRACE_ENTRY(entry
, false);
1136 memcpy(data
, ucb
->buf
, tu
->tp
.size
+ dsize
);
1138 if (size
- esize
> tu
->tp
.size
+ dsize
) {
1139 int len
= tu
->tp
.size
+ dsize
;
1141 memset(data
+ len
, 0, size
- esize
- len
);
1144 perf_trace_buf_submit(entry
, size
, rctx
, call
->event
.type
, 1, regs
,
1150 /* uprobe profile handler */
1151 static int uprobe_perf_func(struct trace_uprobe
*tu
, struct pt_regs
*regs
,
1152 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1154 if (!uprobe_perf_filter(&tu
->consumer
, 0, current
->mm
))
1155 return UPROBE_HANDLER_REMOVE
;
1157 if (!is_ret_probe(tu
))
1158 __uprobe_perf_func(tu
, 0, regs
, ucb
, dsize
);
1162 static void uretprobe_perf_func(struct trace_uprobe
*tu
, unsigned long func
,
1163 struct pt_regs
*regs
,
1164 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1166 __uprobe_perf_func(tu
, func
, regs
, ucb
, dsize
);
1168 #endif /* CONFIG_PERF_EVENTS */
1171 trace_uprobe_register(struct trace_event_call
*event
, enum trace_reg type
,
1174 struct trace_uprobe
*tu
= event
->data
;
1175 struct trace_event_file
*file
= data
;
1178 case TRACE_REG_REGISTER
:
1179 return probe_event_enable(tu
, file
, NULL
);
1181 case TRACE_REG_UNREGISTER
:
1182 probe_event_disable(tu
, file
);
1185 #ifdef CONFIG_PERF_EVENTS
1186 case TRACE_REG_PERF_REGISTER
:
1187 return probe_event_enable(tu
, NULL
, uprobe_perf_filter
);
1189 case TRACE_REG_PERF_UNREGISTER
:
1190 probe_event_disable(tu
, NULL
);
1193 case TRACE_REG_PERF_OPEN
:
1194 return uprobe_perf_open(tu
, data
);
1196 case TRACE_REG_PERF_CLOSE
:
1197 return uprobe_perf_close(tu
, data
);
1206 static int uprobe_dispatcher(struct uprobe_consumer
*con
, struct pt_regs
*regs
)
1208 struct trace_uprobe
*tu
;
1209 struct uprobe_dispatch_data udd
;
1210 struct uprobe_cpu_buffer
*ucb
;
1215 tu
= container_of(con
, struct trace_uprobe
, consumer
);
1219 udd
.bp_addr
= instruction_pointer(regs
);
1221 current
->utask
->vaddr
= (unsigned long) &udd
;
1223 if (WARN_ON_ONCE(!uprobe_cpu_buffer
))
1226 dsize
= __get_data_size(&tu
->tp
, regs
);
1227 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1229 ucb
= uprobe_buffer_get();
1230 store_trace_args(esize
, &tu
->tp
, regs
, ucb
->buf
, dsize
);
1232 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
1233 ret
|= uprobe_trace_func(tu
, regs
, ucb
, dsize
);
1235 #ifdef CONFIG_PERF_EVENTS
1236 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
1237 ret
|= uprobe_perf_func(tu
, regs
, ucb
, dsize
);
1239 uprobe_buffer_put(ucb
);
1243 static int uretprobe_dispatcher(struct uprobe_consumer
*con
,
1244 unsigned long func
, struct pt_regs
*regs
)
1246 struct trace_uprobe
*tu
;
1247 struct uprobe_dispatch_data udd
;
1248 struct uprobe_cpu_buffer
*ucb
;
1251 tu
= container_of(con
, struct trace_uprobe
, consumer
);
1256 current
->utask
->vaddr
= (unsigned long) &udd
;
1258 if (WARN_ON_ONCE(!uprobe_cpu_buffer
))
1261 dsize
= __get_data_size(&tu
->tp
, regs
);
1262 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1264 ucb
= uprobe_buffer_get();
1265 store_trace_args(esize
, &tu
->tp
, regs
, ucb
->buf
, dsize
);
1267 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
1268 uretprobe_trace_func(tu
, func
, regs
, ucb
, dsize
);
1270 #ifdef CONFIG_PERF_EVENTS
1271 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
1272 uretprobe_perf_func(tu
, func
, regs
, ucb
, dsize
);
1274 uprobe_buffer_put(ucb
);
1278 static struct trace_event_functions uprobe_funcs
= {
1279 .trace
= print_uprobe_event
1282 static inline void init_trace_event_call(struct trace_uprobe
*tu
,
1283 struct trace_event_call
*call
)
1285 INIT_LIST_HEAD(&call
->class->fields
);
1286 call
->event
.funcs
= &uprobe_funcs
;
1287 call
->class->define_fields
= uprobe_event_define_fields
;
1289 call
->flags
= TRACE_EVENT_FL_UPROBE
;
1290 call
->class->reg
= trace_uprobe_register
;
1294 static int register_uprobe_event(struct trace_uprobe
*tu
)
1296 struct trace_event_call
*call
= &tu
->tp
.call
;
1299 init_trace_event_call(tu
, call
);
1301 if (set_print_fmt(&tu
->tp
, is_ret_probe(tu
)) < 0)
1304 ret
= register_trace_event(&call
->event
);
1306 kfree(call
->print_fmt
);
1310 ret
= trace_add_event_call(call
);
1313 pr_info("Failed to register uprobe event: %s\n",
1314 trace_event_name(call
));
1315 kfree(call
->print_fmt
);
1316 unregister_trace_event(&call
->event
);
1322 static int unregister_uprobe_event(struct trace_uprobe
*tu
)
1326 /* tu->event is unregistered in trace_remove_event_call() */
1327 ret
= trace_remove_event_call(&tu
->tp
.call
);
1330 kfree(tu
->tp
.call
.print_fmt
);
1331 tu
->tp
.call
.print_fmt
= NULL
;
1335 #ifdef CONFIG_PERF_EVENTS
1336 struct trace_event_call
*
1337 create_local_trace_uprobe(char *name
, unsigned long offs
, bool is_return
)
1339 struct trace_uprobe
*tu
;
1340 struct inode
*inode
;
1344 ret
= kern_path(name
, LOOKUP_FOLLOW
, &path
);
1346 return ERR_PTR(ret
);
1348 inode
= igrab(d_inode(path
.dentry
));
1351 if (!inode
|| !S_ISREG(inode
->i_mode
)) {
1353 return ERR_PTR(-EINVAL
);
1357 * local trace_kprobes are not added to probe_list, so they are never
1358 * searched in find_trace_kprobe(). Therefore, there is no concern of
1359 * duplicated name "DUMMY_EVENT" here.
1361 tu
= alloc_trace_uprobe(UPROBE_EVENT_SYSTEM
, "DUMMY_EVENT", 0,
1365 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1367 return ERR_CAST(tu
);
1372 tu
->filename
= kstrdup(name
, GFP_KERNEL
);
1373 init_trace_event_call(tu
, &tu
->tp
.call
);
1375 if (set_print_fmt(&tu
->tp
, is_ret_probe(tu
)) < 0) {
1380 return &tu
->tp
.call
;
1382 free_trace_uprobe(tu
);
1383 return ERR_PTR(ret
);
1386 void destroy_local_trace_uprobe(struct trace_event_call
*event_call
)
1388 struct trace_uprobe
*tu
;
1390 tu
= container_of(event_call
, struct trace_uprobe
, tp
.call
);
1392 kfree(tu
->tp
.call
.print_fmt
);
1393 tu
->tp
.call
.print_fmt
= NULL
;
1395 free_trace_uprobe(tu
);
1397 #endif /* CONFIG_PERF_EVENTS */
1399 /* Make a trace interface for controling probe points */
1400 static __init
int init_uprobe_trace(void)
1402 struct dentry
*d_tracer
;
1404 d_tracer
= tracing_init_dentry();
1405 if (IS_ERR(d_tracer
))
1408 trace_create_file("uprobe_events", 0644, d_tracer
,
1409 NULL
, &uprobe_events_ops
);
1410 /* Profile interface */
1411 trace_create_file("uprobe_profile", 0444, d_tracer
,
1412 NULL
, &uprobe_profile_ops
);
1416 fs_initcall(init_uprobe_trace
);