]> git.ipfire.org Git - thirdparty/kernel/stable.git/blobdiff - kernel/kprobes.c
Linux 4.14.336
[thirdparty/kernel/stable.git] / kernel / kprobes.c
index 5c90765d37e77d373c8a7a2da0c5e40b26884411..eeee76ba82ca42bf9f650d30bfc98f60e3c87135 100644 (file)
@@ -418,8 +418,8 @@ static inline int kprobe_optready(struct kprobe *p)
        return 0;
 }
 
-/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
-static inline int kprobe_disarmed(struct kprobe *p)
+/* Return true if the kprobe is disarmed. Note: p must be on hash list */
+bool kprobe_disarmed(struct kprobe *p)
 {
        struct optimized_kprobe *op;
 
@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  */
 static void do_optimize_kprobes(void)
 {
+       lockdep_assert_held(&text_mutex);
        /*
         * The optimization/unoptimization refers online_cpus via
         * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
            list_empty(&optimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_optimize_kprobes(&optimizing_list);
-       mutex_unlock(&text_mutex);
 }
 
 /*
@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
+       lockdep_assert_held(&text_mutex);
        /* See comment in do_optimize_kprobes() */
        lockdep_assert_cpus_held();
 
@@ -520,10 +520,11 @@ static void do_unoptimize_kprobes(void)
        if (list_empty(&unoptimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+               /* Switching from detour code to origin */
+               op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
                /* Disarm probes if marked disabled */
                if (kprobe_disabled(&op->kp))
                        arch_disarm_kprobe(&op->kp);
@@ -537,7 +538,6 @@ static void do_unoptimize_kprobes(void)
                } else
                        list_del_init(&op->list);
        }
-       mutex_unlock(&text_mutex);
 }
 
 /* Reclaim all kprobes on the free_list */
@@ -546,8 +546,14 @@ static void do_free_cleaned_kprobes(void)
        struct optimized_kprobe *op, *tmp;
 
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
-               BUG_ON(!kprobe_unused(&op->kp));
                list_del_init(&op->list);
+               if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
+                       /*
+                        * This must not happen, but if there is a kprobe
+                        * still in use, keep it on kprobes hash list.
+                        */
+                       continue;
+               }
                free_aggr_kprobe(&op->kp);
        }
 }
@@ -563,6 +569,7 @@ static void kprobe_optimizer(struct work_struct *work)
 {
        mutex_lock(&kprobe_mutex);
        cpus_read_lock();
+       mutex_lock(&text_mutex);
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
 
@@ -590,12 +597,14 @@ static void kprobe_optimizer(struct work_struct *work)
        do_free_cleaned_kprobes();
 
        mutex_unlock(&module_mutex);
+       mutex_unlock(&text_mutex);
        cpus_read_unlock();
-       mutex_unlock(&kprobe_mutex);
 
        /* Step 5: Kick optimizer again if needed */
        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
                kick_kprobe_optimizer();
+
+       mutex_unlock(&kprobe_mutex);
 }
 
 /* Wait for completing optimization and unoptimization */
@@ -617,6 +626,18 @@ void wait_for_kprobe_optimizer(void)
        mutex_unlock(&kprobe_mutex);
 }
 
+bool optprobe_queued_unopt(struct optimized_kprobe *op)
+{
+       struct optimized_kprobe *_op;
+
+       list_for_each_entry(_op, &unoptimizing_list, list) {
+               if (op == _op)
+                       return true;
+       }
+
+       return false;
+}
+
 /* Optimize kprobe if p is ready to be optimized */
 static void optimize_kprobe(struct kprobe *p)
 {
@@ -638,17 +659,21 @@ static void optimize_kprobe(struct kprobe *p)
                return;
 
        /* Check if it is already optimized. */
-       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
+       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
+               if (optprobe_queued_unopt(op)) {
+                       /* This is under unoptimizing. Just dequeue the probe */
+                       list_del_init(&op->list);
+               }
                return;
+       }
        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 
-       if (!list_empty(&op->list))
-               /* This is under unoptimizing. Just dequeue the probe */
-               list_del_init(&op->list);
-       else {
-               list_add(&op->list, &optimizing_list);
-               kick_kprobe_optimizer();
-       }
+       /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
+       if (WARN_ON_ONCE(!list_empty(&op->list)))
+               return;
+
+       list_add(&op->list, &optimizing_list);
+       kick_kprobe_optimizer();
 }
 
 /* Short cut to direct unoptimizing */
@@ -656,6 +681,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 {
        lockdep_assert_cpus_held();
        arch_unoptimize_kprobe(op);
+       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
        if (kprobe_disabled(&op->kp))
                arch_disarm_kprobe(&op->kp);
 }
@@ -669,38 +695,40 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
                return; /* This is not an optprobe nor optimized */
 
        op = container_of(p, struct optimized_kprobe, kp);
-       if (!kprobe_optimized(p)) {
-               /* Unoptimized or unoptimizing case */
-               if (force && !list_empty(&op->list)) {
-                       /*
-                        * Only if this is unoptimizing kprobe and forced,
-                        * forcibly unoptimize it. (No need to unoptimize
-                        * unoptimized kprobe again :)
-                        */
-                       list_del_init(&op->list);
-                       force_unoptimize_kprobe(op);
-               }
+       if (!kprobe_optimized(p))
                return;
-       }
 
-       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
        if (!list_empty(&op->list)) {
-               /* Dequeue from the optimization queue */
-               list_del_init(&op->list);
+               if (optprobe_queued_unopt(op)) {
+                       /* Queued in unoptimizing queue */
+                       if (force) {
+                               /*
+                                * Forcibly unoptimize the kprobe here, and queue it
+                                * in the freeing list for release afterwards.
+                                */
+                               force_unoptimize_kprobe(op);
+                               list_move(&op->list, &freeing_list);
+                       }
+               } else {
+                       /* Dequeue from the optimizing queue */
+                       list_del_init(&op->list);
+                       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+               }
                return;
        }
+
        /* Optimized kprobe case */
-       if (force)
+       if (force) {
                /* Forcibly update the code: this is a special case */
                force_unoptimize_kprobe(op);
-       else {
+       else {
                list_add(&op->list, &unoptimizing_list);
                kick_kprobe_optimizer();
        }
 }
 
 /* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
        struct optimized_kprobe *op;
 
@@ -716,8 +744,11 @@ static void reuse_unused_kprobe(struct kprobe *ap)
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
-       BUG_ON(!kprobe_optready(ap));
+       if (!kprobe_optready(ap))
+               return -EINVAL;
+
        optimize_kprobe(ap);
+       return 0;
 }
 
 /* Remove optimized instructions */
@@ -942,11 +973,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 #define kprobe_disarmed(p)                     kprobe_disabled(p)
 #define wait_for_kprobe_optimizer()            do {} while (0)
 
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
+       /*
+        * If the optimized kprobe is NOT supported, the aggr kprobe is
+        * released at the same time that the last aggregated kprobe is
+        * unregistered.
+        * Thus there should be no chance to reuse unused kprobe.
+        */
        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
-       BUG_ON(kprobe_unused(ap));
+       return -EINVAL;
 }
 
 static void free_aggr_kprobe(struct kprobe *p)
@@ -1182,6 +1218,26 @@ __releases(hlist_lock)
 }
 NOKPROBE_SYMBOL(kretprobe_table_unlock);
 
+struct kprobe kprobe_busy = {
+       .addr = (void *) get_kprobe,
+};
+
+void kprobe_busy_begin(void)
+{
+       struct kprobe_ctlblk *kcb;
+
+       preempt_disable();
+       __this_cpu_write(current_kprobe, &kprobe_busy);
+       kcb = get_kprobe_ctlblk();
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+}
+
+void kprobe_busy_end(void)
+{
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable();
+}
+
 /*
  * This function is called from finish_task_switch when task tk becomes dead,
  * so that we can recycle any function-return probe instances associated
@@ -1199,6 +1255,8 @@ void kprobe_flush_task(struct task_struct *tk)
                /* Early boot.  kretprobe_table_locks not yet initialized. */
                return;
 
+       kprobe_busy_begin();
+
        INIT_HLIST_HEAD(&empty_rp);
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
@@ -1212,6 +1270,8 @@ void kprobe_flush_task(struct task_struct *tk)
                hlist_del(&ri->hlist);
                kfree(ri);
        }
+
+       kprobe_busy_end();
 }
 NOKPROBE_SYMBOL(kprobe_flush_task);
 
@@ -1320,9 +1380,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
                        goto out;
                }
                init_aggr_kprobe(ap, orig_p);
-       } else if (kprobe_unused(ap))
+       } else if (kprobe_unused(ap)) {
                /* This probe is going to die. Rescue it */
-               reuse_unused_kprobe(ap);
+               ret = reuse_unused_kprobe(ap);
+               if (ret)
+                       goto out;
+       }
 
        if (kprobe_gone(ap)) {
                /*
@@ -1488,9 +1551,12 @@ static int check_kprobe_address_safe(struct kprobe *p,
        preempt_disable();
 
        /* Ensure it is not in reserved area nor out of text */
-       if (!kernel_text_address((unsigned long) p->addr) ||
+       if (!(core_kernel_text((unsigned long) p->addr) ||
+           is_module_text_address((unsigned long) p->addr)) ||
+           in_gate_area_no_mm((unsigned long) p->addr) ||
            within_kprobe_blacklist((unsigned long) p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr)) {
+           jump_label_text_reserved(p->addr, p->addr) ||
+           find_bug((unsigned long)p->addr)) {
                ret = -EINVAL;
                goto out;
        }
@@ -1622,12 +1688,14 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
                /* Try to disarm and disable this/parent probe */
                if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
                        /*
-                        * If kprobes_all_disarmed is set, orig_p
-                        * should have already been disarmed, so
-                        * skip unneed disarming process.
+                        * Don't be lazy here.  Even if 'kprobes_all_disarmed'
+                        * is false, 'orig_p' might not have been armed yet.
+                        * Note arm_all_kprobes() __tries__ to arm all kprobes
+                        * on the best effort basis.
                         */
-                       if (!kprobes_all_disarmed)
+                       if (!kprobes_all_disarmed && !kprobe_disabled(orig_p))
                                disarm_kprobe(orig_p, true);
+
                        orig_p->flags |= KPROBE_FLAG_DISABLED;
                }
        }
@@ -1925,6 +1993,10 @@ int register_kretprobe(struct kretprobe *rp)
        if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
                return -EINVAL;
 
+       /* If only rp->kp.addr is specified, check reregistering kprobes */
+       if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
+               return -EINVAL;
+
        if (kretprobe_blacklist_size) {
                addr = kprobe_addr(&rp->kp);
                if (IS_ERR(addr))
@@ -1936,6 +2008,9 @@ int register_kretprobe(struct kretprobe *rp)
                }
        }
 
+       if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
+               return -E2BIG;
+
        rp->kp.pre_handler = pre_handler_kretprobe;
        rp->kp.post_handler = NULL;
        rp->kp.fault_handler = NULL;
@@ -2053,6 +2128,9 @@ static void kill_kprobe(struct kprobe *p)
 {
        struct kprobe *kp;
 
+       if (WARN_ON_ONCE(kprobe_gone(p)))
+               return;
+
        p->flags |= KPROBE_FLAG_GONE;
        if (kprobe_aggrprobe(p)) {
                /*
@@ -2070,6 +2148,14 @@ static void kill_kprobe(struct kprobe *p)
         * the original probed function (which will be freed soon) any more.
         */
        arch_remove_kprobe(p);
+
+       /*
+        * The module is going away. We should disarm the kprobe which
+        * is using ftrace, because ftrace framework is still available at
+        * MODULE_STATE_GOING notification.
+        */
+       if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
+               disarm_kprobe_ftrace(p);
 }
 
 /* Disable one kprobe */
@@ -2188,7 +2274,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
        mutex_lock(&kprobe_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist)
+               hlist_for_each_entry_rcu(p, head, hlist) {
+                       if (kprobe_gone(p))
+                               continue;
+
                        if (within_module_init((unsigned long)p->addr, mod) ||
                            (checkcore &&
                             within_module_core((unsigned long)p->addr, mod))) {
@@ -2205,6 +2294,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
                                 */
                                kill_kprobe(p);
                        }
+               }
        }
        mutex_unlock(&kprobe_mutex);
        return NOTIFY_DONE;