]> git.ipfire.org Git - people/arne_f/kernel.git/blobdiff - kernel/trace/ftrace.c
ftrace: Setup correct FTRACE_FL_REGS flags for module
[people/arne_f/kernel.git] / kernel / trace / ftrace.c
index 8319e09e15b945f14f9046edeb885e173ef26652..9e831623d2deac7a9fe4f02d1ecc5c2ae49dc35f 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
+#include <linux/kprobes.h>
 
 #include <trace/events/sched.h>
 
@@ -621,8 +622,7 @@ static int function_stat_show(struct seq_file *m, void *v)
        }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       avg = rec->time;
-       do_div(avg, rec->counter);
+       avg = div64_ul(rec->time, rec->counter);
        if (tracing_thresh && (avg < tracing_thresh))
                goto out;
 #endif
@@ -648,7 +648,8 @@ static int function_stat_show(struct seq_file *m, void *v)
                 * Divide only 1000 for ns^2 -> us^2 conversion.
                 * trace_print_graph_duration will divide 1000 again.
                 */
-               do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+               stddev = div64_ul(stddev,
+                                 rec->counter * (rec->counter - 1) * 1000);
        }
 
        trace_seq_init(&s);
@@ -1711,6 +1712,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
        return  keep_regs;
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+static struct ftrace_ops *
+ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+
 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                     int filter_hash,
                                     bool inc)
@@ -1839,15 +1845,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        }
 
                        /*
-                        * If the rec had TRAMP enabled, then it needs to
-                        * be cleared. As TRAMP can only be enabled iff
-                        * there is only a single ops attached to it.
-                        * In otherwords, always disable it on decrementing.
-                        * In the future, we may set it if rec count is
-                        * decremented to one, and the ops that is left
-                        * has a trampoline.
+                        * The TRAMP needs to be set only if rec count
+                        * is decremented to one, and the ops that is
+                        * left has a trampoline. As TRAMP can only be
+                        * enabled if there is only a single ops attached
+                        * to it.
                         */
-                       rec->flags &= ~FTRACE_FL_TRAMP;
+                       if (ftrace_rec_count(rec) == 1 &&
+                           ftrace_find_tramp_ops_any(rec))
+                               rec->flags |= FTRACE_FL_TRAMP;
+                       else
+                               rec->flags &= ~FTRACE_FL_TRAMP;
 
                        /*
                         * flags will be cleared in ftrace_check_record()
@@ -2040,11 +2048,6 @@ static void print_ip_ins(const char *fmt, const unsigned char *p)
                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
 }
 
-static struct ftrace_ops *
-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
-static struct ftrace_ops *
-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
-
 enum ftrace_bug_type ftrace_bug_type;
 const void *ftrace_expected;
 
@@ -3181,6 +3184,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
                hnd = &iter->probe_entry->hlist;
 
        hash = iter->probe->ops.func_hash->filter_hash;
+
+       /*
+        * A probe being registered may temporarily have an empty hash
+        * and it's at the end of the func_probes list.
+        */
+       if (!hash || hash == EMPTY_HASH)
+               return NULL;
+
        size = 1 << hash->size_bits;
 
  retry:
@@ -3618,21 +3629,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        struct list_head *mod_head;
        struct trace_array *tr = ops->private;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        ftrace_ops_init(ops);
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       if (tr && trace_array_get(tr) < 0)
+               return -ENODEV;
+
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter)
-               return -ENOMEM;
+               goto out;
 
-       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
-               kfree(iter);
-               return -ENOMEM;
-       }
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+               goto out;
 
        iter->ops = ops;
        iter->flags = flag;
@@ -3662,13 +3674,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
-                       ret = -ENOMEM;
                        goto out_unlock;
                }
        } else
                iter->hash = hash;
 
+       ret = 0;
+
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
 
@@ -3680,7 +3692,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                        /* Failed */
                        free_ftrace_hash(iter->hash);
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
                }
        } else
                file->private_data = iter;
@@ -3688,6 +3699,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
  out_unlock:
        mutex_unlock(&ops->func_hash->regex_lock);
 
+ out:
+       if (ret) {
+               kfree(iter);
+               if (tr)
+                       trace_array_put(tr);
+       }
+
        return ret;
 }
 
@@ -4279,10 +4297,13 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
        struct ftrace_func_entry *entry;
        struct ftrace_func_map *map;
        struct hlist_head *hhd;
-       int size = 1 << mapper->hash.size_bits;
-       int i;
+       int size, i;
+
+       if (!mapper)
+               return;
 
        if (free_func && mapper->hash.count) {
+               size = 1 << mapper->hash.size_bits;
                for (i = 0; i < size; i++) {
                        hhd = &mapper->hash.buckets[i];
                        hlist_for_each_entry(entry, hhd, hlist) {
@@ -4374,12 +4395,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
 
        mutex_unlock(&ftrace_lock);
 
+       /*
+        * Note, there's a small window here that the func_hash->filter_hash
+        * may be NULL or empty. Need to be carefule when reading the loop.
+        */
        mutex_lock(&probe->ops.func_hash->regex_lock);
 
        orig_hash = &probe->ops.func_hash->filter_hash;
        old_hash = *orig_hash;
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
 
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        ret = ftrace_match_records(hash, glob, strlen(glob));
 
        /* Nothing found? */
@@ -4488,7 +4518,6 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
                func_g.type = filter_parse_regex(glob, strlen(glob),
                                                 &func_g.search, &not);
                func_g.len = strlen(func_g.search);
-               func_g.search = glob;
 
                /* we do not support '!' for function probes */
                if (WARN_ON(not))
@@ -5076,6 +5105,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
+       if (iter->tr)
+               trace_array_put(iter->tr);
        kfree(iter);
 
        return 0;
@@ -5115,8 +5146,8 @@ static const struct file_operations ftrace_notrace_fops = {
 
 static DEFINE_MUTEX(graph_lock);
 
-struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
-struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
 
 enum graph_filter_type {
        GRAPH_FILTER_NOTRACE    = 0,
@@ -5388,8 +5419,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&graph_lock);
 
-               /* Wait till all users are no longer using the old hash */
-               synchronize_sched();
+               /*
+                * We need to do a hard force of sched synchronization.
+                * This is because we use preempt_disable() to do RCU, but
+                * the function tracers can be called where RCU is not watching
+                * (like before user_exit()). We can not rely on the RCU
+                * infrastructure to do the synchronization, thus we must do it
+                * ourselves.
+                */
+               schedule_on_each_cpu(ftrace_sync);
 
                free_ftrace_hash(old_hash);
        }
@@ -5535,6 +5573,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
        if (ops->flags & FTRACE_OPS_FL_ENABLED)
                ftrace_shutdown(ops, 0);
        ops->flags |= FTRACE_OPS_FL_DELETED;
+       ftrace_free_filter(ops);
        mutex_unlock(&ftrace_lock);
 }
 
@@ -5682,8 +5721,11 @@ static int referenced_filters(struct dyn_ftrace *rec)
        int cnt = 0;
 
        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
-               if (ops_references_rec(ops, rec))
-                   cnt++;
+               if (ops_references_rec(ops, rec)) {
+                       cnt++;
+                       if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+                               rec->flags |= FTRACE_FL_REGS;
+               }
        }
 
        return cnt;
@@ -5832,8 +5874,8 @@ void ftrace_module_enable(struct module *mod)
                if (ftrace_start_up)
                        cnt += referenced_filters(rec);
 
-               /* This clears FTRACE_FL_DISABLED */
-               rec->flags = cnt;
+               rec->flags &= ~FTRACE_FL_DISABLED;
+               rec->flags += cnt;
 
                if (ftrace_start_up && cnt) {
                        int failed = __ftrace_replace_code(rec, 1);
@@ -6035,7 +6077,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
        tr->ops->func = ftrace_stub;
 }
 
-static inline void
+static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
@@ -6098,11 +6140,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
 
 /*
@@ -6132,6 +6176,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
 
 /**
  * ftrace_ops_get_func - get the function a trampoline should call
@@ -6275,9 +6320,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
        struct trace_array *tr = m->private;
        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
 
-       if (v == FTRACE_NO_PIDS)
+       if (v == FTRACE_NO_PIDS) {
+               (*pos)++;
                return NULL;
-
+       }
        return trace_pid_next(pid_list, v, pos);
 }