]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/mce: Change to not send SIGBUS error during copy from user
authorTony Luck <tony.luck@intel.com>
Wed, 18 Aug 2021 00:29:41 +0000 (17:29 -0700)
committerBorislav Petkov <bp@suse.de>
Mon, 20 Sep 2021 08:55:41 +0000 (10:55 +0200)
Sending a SIGBUS for a copy from user is not the correct semantic.
System calls should return -EFAULT (or a short count for write(2)).

Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210818002942.1607544-3-tony.luck@intel.com
arch/x86/kernel/cpu/mce/core.c

index 193204aee880178f6d30412b4c068a033473e330..69768fea1dac918c16545d2a676e8be5b63853c8 100644 (file)
@@ -1272,7 +1272,7 @@ static void kill_me_maybe(struct callback_head *cb)
                flags |= MF_MUST_KILL;
 
        ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
-       if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
+       if (!ret) {
                set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
                sync_core();
                return;
@@ -1286,15 +1286,21 @@ static void kill_me_maybe(struct callback_head *cb)
        if (ret == -EHWPOISON)
                return;
 
-       if (p->mce_vaddr != (void __user *)-1l) {
-               force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
-       } else {
-               pr_err("Memory error not recovered");
-               kill_me_now(cb);
-       }
+       pr_err("Memory error not recovered");
+       kill_me_now(cb);
+}
+
+static void kill_me_never(struct callback_head *cb)
+{
+       struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
+
+       p->mce_count = 0;
+       pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr);
+       if (!memory_failure(p->mce_addr >> PAGE_SHIFT, 0))
+               set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
 }
 
-static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *))
 {
        int count = ++current->mce_count;
 
@@ -1304,11 +1310,7 @@ static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
                current->mce_kflags = m->kflags;
                current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
                current->mce_whole_page = whole_page(m);
-
-               if (kill_current_task)
-                       current->mce_kill_me.func = kill_me_now;
-               else
-                       current->mce_kill_me.func = kill_me_maybe;
+               current->mce_kill_me.func = func;
        }
 
        /* Ten is likely overkill. Don't expect more than two faults before task_work() */
@@ -1459,7 +1461,10 @@ noinstr void do_machine_check(struct pt_regs *regs)
                /* If this triggers there is no way to recover. Die hard. */
                BUG_ON(!on_thread_stack() || !user_mode(regs));
 
-               queue_task_work(&m, msg, kill_current_task);
+               if (kill_current_task)
+                       queue_task_work(&m, msg, kill_me_now);
+               else
+                       queue_task_work(&m, msg, kill_me_maybe);
 
        } else {
                /*
@@ -1477,7 +1482,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                }
 
                if (m.kflags & MCE_IN_KERNEL_COPYIN)
-                       queue_task_work(&m, msg, kill_current_task);
+                       queue_task_work(&m, msg, kill_me_never);
        }
 out:
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);