]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Mar 2021 11:51:40 +0000 (12:51 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Mar 2021 11:51:40 +0000 (12:51 +0100)
added patches:
entry-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch
entry-kvm-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch
floppy-reintroduce-o_ndelay-fix.patch
kprobes-fix-to-delay-the-kprobes-jump-optimization.patch
media-i2c-max9286-fix-access-to-unallocated-memory.patch
media-ipu3-cio2-fix-mbus_code-processing-in-cio2_subdev_set_fmt.patch
media-ir_toy-add-another-ir-droid-device.patch
media-marvell-ccic-power-up-the-device-on-mclk-enable.patch
media-smipcie-fix-interrupt-handling-and-ir-timeout.patch
powerpc-prom-fix-ibm-arch-vec-5-platform-support-scan.patch
rcu-nocb-perform-deferred-wake-up-before-last-idle-s-need_resched-check.patch
rcu-nocb-trigger-self-ipi-on-late-deferred-wake-up-before-user-resume.patch
rcu-pull-deferred-rcuog-wake-up-to-rcu_eqs_enter-callers.patch
x86-entry-fix-instrumentation-annotation.patch
x86-fault-fix-amd-erratum-91-errata-fixup-for-user-code.patch
x86-reboot-force-all-cpus-to-exit-vmx-root-if-vmx-is-supported.patch
x86-virt-eat-faults-on-vmxoff-in-reboot-flows.patch

18 files changed:
queue-5.10/entry-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch [new file with mode: 0644]
queue-5.10/entry-kvm-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch [new file with mode: 0644]
queue-5.10/floppy-reintroduce-o_ndelay-fix.patch [new file with mode: 0644]
queue-5.10/kprobes-fix-to-delay-the-kprobes-jump-optimization.patch [new file with mode: 0644]
queue-5.10/media-i2c-max9286-fix-access-to-unallocated-memory.patch [new file with mode: 0644]
queue-5.10/media-ipu3-cio2-fix-mbus_code-processing-in-cio2_subdev_set_fmt.patch [new file with mode: 0644]
queue-5.10/media-ir_toy-add-another-ir-droid-device.patch [new file with mode: 0644]
queue-5.10/media-marvell-ccic-power-up-the-device-on-mclk-enable.patch [new file with mode: 0644]
queue-5.10/media-smipcie-fix-interrupt-handling-and-ir-timeout.patch [new file with mode: 0644]
queue-5.10/powerpc-prom-fix-ibm-arch-vec-5-platform-support-scan.patch [new file with mode: 0644]
queue-5.10/rcu-nocb-perform-deferred-wake-up-before-last-idle-s-need_resched-check.patch [new file with mode: 0644]
queue-5.10/rcu-nocb-trigger-self-ipi-on-late-deferred-wake-up-before-user-resume.patch [new file with mode: 0644]
queue-5.10/rcu-pull-deferred-rcuog-wake-up-to-rcu_eqs_enter-callers.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-entry-fix-instrumentation-annotation.patch [new file with mode: 0644]
queue-5.10/x86-fault-fix-amd-erratum-91-errata-fixup-for-user-code.patch [new file with mode: 0644]
queue-5.10/x86-reboot-force-all-cpus-to-exit-vmx-root-if-vmx-is-supported.patch [new file with mode: 0644]
queue-5.10/x86-virt-eat-faults-on-vmxoff-in-reboot-flows.patch [new file with mode: 0644]

diff --git a/queue-5.10/entry-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch b/queue-5.10/entry-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch
new file mode 100644 (file)
index 0000000..6862e19
--- /dev/null
@@ -0,0 +1,71 @@
+From 47b8ff194c1fd73d58dc339b597d466fe48c8958 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 1 Feb 2021 00:05:47 +0100
+Subject: entry: Explicitly flush pending rcuog wakeup before last rescheduling point
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit 47b8ff194c1fd73d58dc339b597d466fe48c8958 upstream.
+
+Following the idle loop model, cleanly check for pending rcuog wakeup
+before the last rescheduling point on resuming to user mode. This
+way we can avoid to do it from rcu_user_enter() with the last resort
+self-IPI hack that enforces rescheduling.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20210131230548.32970-5-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/entry/common.c |    7 +++++++
+ kernel/rcu/tree.c     |   12 +++++++-----
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -174,6 +174,10 @@ static unsigned long exit_to_user_mode_l
+                * enabled above.
+                */
+               local_irq_disable_exit_to_user();
++
++              /* Check if any of the above work has queued a deferred wakeup */
++              rcu_nocb_flush_deferred_wakeup();
++
+               ti_work = READ_ONCE(current_thread_info()->flags);
+       }
+@@ -187,6 +191,9 @@ static void exit_to_user_mode_prepare(st
+       lockdep_assert_irqs_disabled();
++      /* Flush pending rcuog wakeup before the last need_resched() check */
++      rcu_nocb_flush_deferred_wakeup();
++
+       if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+               ti_work = exit_to_user_mode_loop(regs, ti_work);
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -699,13 +699,15 @@ noinstr void rcu_user_enter(void)
+       lockdep_assert_irqs_disabled();
+       /*
+-       * We may be past the last rescheduling opportunity in the entry code.
+-       * Trigger a self IPI that will fire and reschedule once we resume to
+-       * user/guest mode.
++       * Other than generic entry implementation, we may be past the last
++       * rescheduling opportunity in the entry code. Trigger a self IPI
++       * that will fire and reschedule once we resume in user/guest mode.
+        */
+       instrumentation_begin();
+-      if (do_nocb_deferred_wakeup(rdp) && need_resched())
+-              irq_work_queue(this_cpu_ptr(&late_wakeup_work));
++      if (!IS_ENABLED(CONFIG_GENERIC_ENTRY) || (current->flags & PF_VCPU)) {
++              if (do_nocb_deferred_wakeup(rdp) && need_resched())
++                      irq_work_queue(this_cpu_ptr(&late_wakeup_work));
++      }
+       instrumentation_end();
+       rcu_eqs_enter(true);
diff --git a/queue-5.10/entry-kvm-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch b/queue-5.10/entry-kvm-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch
new file mode 100644 (file)
index 0000000..2deff61
--- /dev/null
@@ -0,0 +1,147 @@
+From 4ae7dc97f726ea95c58ac58af71cc034ad22d7de Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 1 Feb 2021 00:05:48 +0100
+Subject: entry/kvm: Explicitly flush pending rcuog wakeup before last rescheduling point
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit 4ae7dc97f726ea95c58ac58af71cc034ad22d7de upstream.
+
+Following the idle loop model, cleanly check for pending rcuog wakeup
+before the last rescheduling point upon resuming to guest mode. This
+way we can avoid to do it from rcu_user_enter() with the last resort
+self-IPI hack that enforces rescheduling.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20210131230548.32970-6-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c        |    1 +
+ include/linux/entry-kvm.h |   14 ++++++++++++++
+ kernel/rcu/tree.c         |   44 ++++++++++++++++++++++++++++++++++----------
+ kernel/rcu/tree_plugin.h  |    1 +
+ 4 files changed, 50 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1776,6 +1776,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
+ bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+ {
++      xfer_to_guest_mode_prepare();
+       return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
+               xfer_to_guest_mode_work_pending();
+ }
+--- a/include/linux/entry-kvm.h
++++ b/include/linux/entry-kvm.h
+@@ -47,6 +47,20 @@ static inline int arch_xfer_to_guest_mod
+ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+ /**
++ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
++ *                            need to be handled while IRQs are disabled
++ *                            upon entering to guest.
++ *
++ * Has to be invoked with interrupts disabled before the last call
++ * to xfer_to_guest_mode_work_pending().
++ */
++static inline void xfer_to_guest_mode_prepare(void)
++{
++      lockdep_assert_irqs_disabled();
++      rcu_nocb_flush_deferred_wakeup();
++}
++
++/**
+  * __xfer_to_guest_mode_work_pending - Check if work is pending
+  *
+  * Returns: True if work pending, False otherwise.
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -670,9 +670,10 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+ #ifdef CONFIG_NO_HZ_FULL
++#if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
+ /*
+  * An empty function that will trigger a reschedule on
+- * IRQ tail once IRQs get re-enabled on userspace resume.
++ * IRQ tail once IRQs get re-enabled on userspace/guest resume.
+  */
+ static void late_wakeup_func(struct irq_work *work)
+ {
+@@ -681,6 +682,37 @@ static void late_wakeup_func(struct irq_
+ static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
+       IRQ_WORK_INIT(late_wakeup_func);
++/*
++ * If either:
++ *
++ * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
++ * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
++ *
++ * In these cases the late RCU wake ups aren't supported in the resched loops and our
++ * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
++ * get re-enabled again.
++ */
++noinstr static void rcu_irq_work_resched(void)
++{
++      struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
++
++      if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
++              return;
++
++      if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
++              return;
++
++      instrumentation_begin();
++      if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
++              irq_work_queue(this_cpu_ptr(&late_wakeup_work));
++      }
++      instrumentation_end();
++}
++
++#else
++static inline void rcu_irq_work_resched(void) { }
++#endif
++
+ /**
+  * rcu_user_enter - inform RCU that we are resuming userspace.
+  *
+@@ -694,8 +726,6 @@ static DEFINE_PER_CPU(struct irq_work, l
+  */
+ noinstr void rcu_user_enter(void)
+ {
+-      struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+-
+       lockdep_assert_irqs_disabled();
+       /*
+@@ -703,13 +733,7 @@ noinstr void rcu_user_enter(void)
+        * rescheduling opportunity in the entry code. Trigger a self IPI
+        * that will fire and reschedule once we resume in user/guest mode.
+        */
+-      instrumentation_begin();
+-      if (!IS_ENABLED(CONFIG_GENERIC_ENTRY) || (current->flags & PF_VCPU)) {
+-              if (do_nocb_deferred_wakeup(rdp) && need_resched())
+-                      irq_work_queue(this_cpu_ptr(&late_wakeup_work));
+-      }
+-      instrumentation_end();
+-
++      rcu_irq_work_resched();
+       rcu_eqs_enter(true);
+ }
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -2197,6 +2197,7 @@ void rcu_nocb_flush_deferred_wakeup(void
+ {
+       do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
+ }
++EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
+ void __init rcu_init_nohz(void)
+ {
diff --git a/queue-5.10/floppy-reintroduce-o_ndelay-fix.patch b/queue-5.10/floppy-reintroduce-o_ndelay-fix.patch
new file mode 100644 (file)
index 0000000..e4fb519
--- /dev/null
@@ -0,0 +1,83 @@
+From 8a0c014cd20516ade9654fc13b51345ec58e7be8 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 22 Jan 2021 12:13:20 +0100
+Subject: floppy: reintroduce O_NDELAY fix
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit 8a0c014cd20516ade9654fc13b51345ec58e7be8 upstream.
+
+This issue was originally fixed in 09954bad4 ("floppy: refactor open()
+flags handling").
+
+The fix as a side-effect, however, introduce issue for open(O_ACCMODE)
+that is being used for ioctl-only open. I wrote a fix for that, but
+instead of it being merged, full revert of 09954bad4 was performed,
+re-introducing the O_NDELAY / O_NONBLOCK issue, and it strikes again.
+
+This is a forward-port of the original fix to current codebase; the
+original submission had the changelog below:
+
+====
+Commit 09954bad4 ("floppy: refactor open() flags handling"), as a
+side-effect, causes open(/dev/fdX, O_ACCMODE) to fail. It turns out that
+this is being used setfdprm userspace for ioctl-only open().
+
+Reintroduce back the original behavior wrt !(FMODE_READ|FMODE_WRITE)
+modes, while still keeping the original O_NDELAY bug fixed.
+
+Link: https://lore.kernel.org/r/nycvar.YFH.7.76.2101221209060.5622@cbobk.fhfr.pm
+Cc: stable@vger.kernel.org
+Reported-by: Wim Osterholt <wim@djo.tudelft.nl>
+Tested-by: Wim Osterholt <wim@djo.tudelft.nl>
+Reported-and-tested-by: Kurt Garloff <kurt@garloff.de>
+Fixes: 09954bad4 ("floppy: refactor open() flags handling")
+Fixes: f2791e7ead ("Revert "floppy: refactor open() flags handling"")
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Denis Efremov <efremov@linux.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/floppy.c |   30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4120,23 +4120,23 @@ static int floppy_open(struct block_devi
+       if (fdc_state[FDC(drive)].rawcmd == 1)
+               fdc_state[FDC(drive)].rawcmd = 2;
+-      if (!(mode & FMODE_NDELAY)) {
+-              if (mode & (FMODE_READ|FMODE_WRITE)) {
+-                      drive_state[drive].last_checked = 0;
+-                      clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
+-                                &drive_state[drive].flags);
+-                      if (bdev_check_media_change(bdev))
+-                              floppy_revalidate(bdev->bd_disk);
+-                      if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
+-                              goto out;
+-                      if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+-                              goto out;
+-              }
+-              res = -EROFS;
+-              if ((mode & FMODE_WRITE) &&
+-                  !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
++      if (mode & (FMODE_READ|FMODE_WRITE)) {
++              drive_state[drive].last_checked = 0;
++              clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
++              if (bdev_check_media_change(bdev))
++                      floppy_revalidate(bdev->bd_disk);
++              if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
++                      goto out;
++              if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+                       goto out;
+       }
++
++      res = -EROFS;
++
++      if ((mode & FMODE_WRITE) &&
++                      !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
++              goto out;
++
+       mutex_unlock(&open_lock);
+       mutex_unlock(&floppy_mutex);
+       return 0;
diff --git a/queue-5.10/kprobes-fix-to-delay-the-kprobes-jump-optimization.patch b/queue-5.10/kprobes-fix-to-delay-the-kprobes-jump-optimization.patch
new file mode 100644 (file)
index 0000000..59c62b2
--- /dev/null
@@ -0,0 +1,115 @@
+From c85c9a2c6e368dc94907e63babb18a9788e5c9b6 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Thu, 18 Feb 2021 23:29:23 +0900
+Subject: kprobes: Fix to delay the kprobes jump optimization
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit c85c9a2c6e368dc94907e63babb18a9788e5c9b6 upstream.
+
+Commit 36dadef23fcc ("kprobes: Init kprobes in early_initcall")
+moved the kprobe setup in early_initcall(), which includes kprobe
+jump optimization.
+The kprobes jump optimizer involves synchronize_rcu_tasks() which
+depends on the ksoftirqd and rcu_spawn_tasks_*(). However, since
+those are setup in core_initcall(), kprobes jump optimizer can not
+run at the early_initcall().
+
+To avoid this issue, make the kprobe optimization disabled in the
+early_initcall() and enables it in subsys_initcall().
+
+Note that non-optimized kprobes is still available after
+early_initcall(). Only jump optimization is delayed.
+
+Link: https://lkml.kernel.org/r/161365856280.719838.12423085451287256713.stgit@devnote2
+
+Fixes: 36dadef23fcc ("kprobes: Init kprobes in early_initcall")
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: RCU <rcu@vger.kernel.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Daniel Axtens <dja@axtens.net>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: "Theodore Y . Ts'o" <tytso@mit.edu>
+Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
+Cc: stable@vger.kernel.org
+Reported-by: Paul E. McKenney <paulmck@kernel.org>
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Uladzislau Rezki <urezki@gmail.com>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kprobes.c |   31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -871,7 +871,6 @@ out:
+       cpus_read_unlock();
+ }
+-#ifdef CONFIG_SYSCTL
+ static void optimize_all_kprobes(void)
+ {
+       struct hlist_head *head;
+@@ -897,6 +896,7 @@ out:
+       mutex_unlock(&kprobe_mutex);
+ }
++#ifdef CONFIG_SYSCTL
+ static void unoptimize_all_kprobes(void)
+ {
+       struct hlist_head *head;
+@@ -2627,18 +2627,14 @@ static int __init init_kprobes(void)
+               }
+       }
+-#if defined(CONFIG_OPTPROBES)
+-#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+-      /* Init kprobe_optinsn_slots */
+-      kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
+-#endif
+-      /* By default, kprobes can be optimized */
+-      kprobes_allow_optimization = true;
+-#endif
+-
+       /* By default, kprobes are armed */
+       kprobes_all_disarmed = false;
++#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
++      /* Init kprobe_optinsn_slots for allocation */
++      kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
++#endif
++
+       err = arch_init_kprobes();
+       if (!err)
+               err = register_die_notifier(&kprobe_exceptions_nb);
+@@ -2653,6 +2649,21 @@ static int __init init_kprobes(void)
+ }
+ early_initcall(init_kprobes);
++#if defined(CONFIG_OPTPROBES)
++static int __init init_optprobes(void)
++{
++      /*
++       * Enable kprobe optimization - this kicks the optimizer which
++       * depends on synchronize_rcu_tasks() and ksoftirqd, that is
++       * not spawned in early initcall. So delay the optimization.
++       */
++      optimize_all_kprobes();
++
++      return 0;
++}
++subsys_initcall(init_optprobes);
++#endif
++
+ #ifdef CONFIG_DEBUG_FS
+ static void report_probe(struct seq_file *pi, struct kprobe *p,
+               const char *sym, int offset, char *modname, struct kprobe *pp)
diff --git a/queue-5.10/media-i2c-max9286-fix-access-to-unallocated-memory.patch b/queue-5.10/media-i2c-max9286-fix-access-to-unallocated-memory.patch
new file mode 100644 (file)
index 0000000..9b8d208
--- /dev/null
@@ -0,0 +1,37 @@
+From e88ccf09e79cf33cac40316ba69c820d9eebc82b Mon Sep 17 00:00:00 2001
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Date: Mon, 18 Jan 2021 09:14:46 +0100
+Subject: media: i2c: max9286: fix access to unallocated memory
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+commit e88ccf09e79cf33cac40316ba69c820d9eebc82b upstream.
+
+The asd allocated with v4l2_async_notifier_add_fwnode_subdev() must be
+of size max9286_asd, otherwise access to max9286_asd->source will go to
+unallocated memory.
+
+Fixes: 86d37bf31af6 ("media: i2c: max9286: Allocate v4l2_async_subdev dynamically")
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Cc: stable@vger.kernel.org # v5.10+
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+Tested-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/max9286.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -580,7 +580,7 @@ static int max9286_v4l2_notifier_registe
+               asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
+                                                           source->fwnode,
+-                                                          sizeof(*asd));
++                                                          sizeof(struct max9286_asd));
+               if (IS_ERR(asd)) {
+                       dev_err(dev, "Failed to add subdev for source %u: %ld",
+                               i, PTR_ERR(asd));
diff --git a/queue-5.10/media-ipu3-cio2-fix-mbus_code-processing-in-cio2_subdev_set_fmt.patch b/queue-5.10/media-ipu3-cio2-fix-mbus_code-processing-in-cio2_subdev_set_fmt.patch
new file mode 100644 (file)
index 0000000..1297010
--- /dev/null
@@ -0,0 +1,35 @@
+From 334de4b45892f7e67074e1b1b2ac36fd3e091118 Mon Sep 17 00:00:00 2001
+From: Pavel Machek <pavel@denx.de>
+Date: Wed, 30 Dec 2020 13:55:50 +0100
+Subject: media: ipu3-cio2: Fix mbus_code processing in cio2_subdev_set_fmt()
+
+From: Pavel Machek <pavel@denx.de>
+
+commit 334de4b45892f7e67074e1b1b2ac36fd3e091118 upstream.
+
+Loop was useless as it would always exit on the first iteration. Fix
+it with right condition.
+
+Signed-off-by: Pavel Machek (CIP) <pavel@denx.de>
+Fixes: a86cf9b29e8b ("media: ipu3-cio2: Validate mbus format in setting subdev format")
+Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: stable@vger.kernel.org # v4.16 and up
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/pci/intel/ipu3/ipu3-cio2.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+@@ -1277,7 +1277,7 @@ static int cio2_subdev_set_fmt(struct v4
+       fmt->format.code = formats[0].mbus_code;
+       for (i = 0; i < ARRAY_SIZE(formats); i++) {
+-              if (formats[i].mbus_code == fmt->format.code) {
++              if (formats[i].mbus_code == mbus_code) {
+                       fmt->format.code = mbus_code;
+                       break;
+               }
diff --git a/queue-5.10/media-ir_toy-add-another-ir-droid-device.patch b/queue-5.10/media-ir_toy-add-another-ir-droid-device.patch
new file mode 100644 (file)
index 0000000..93931ac
--- /dev/null
@@ -0,0 +1,32 @@
+From 4487e0215560392bd11c9de08d60824d72c89cd9 Mon Sep 17 00:00:00 2001
+From: Sean Young <sean@mess.org>
+Date: Sun, 27 Dec 2020 14:45:01 +0100
+Subject: media: ir_toy: add another IR Droid device
+
+From: Sean Young <sean@mess.org>
+
+commit 4487e0215560392bd11c9de08d60824d72c89cd9 upstream.
+
+This device is also supported.
+
+Cc: stable@vger.kernel.org
+Tested-by: Georgi Bakalski <georgi.bakalski@gmail.com>
+Reported-by: Georgi Bakalski <georgi.bakalski@gmail.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/rc/ir_toy.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/media/rc/ir_toy.c
++++ b/drivers/media/rc/ir_toy.c
+@@ -491,6 +491,7 @@ static void irtoy_disconnect(struct usb_
+ static const struct usb_device_id irtoy_table[] = {
+       { USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xfd08, USB_CLASS_CDC_DATA) },
++      { USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xf58b, USB_CLASS_CDC_DATA) },
+       { }
+ };
diff --git a/queue-5.10/media-marvell-ccic-power-up-the-device-on-mclk-enable.patch b/queue-5.10/media-marvell-ccic-power-up-the-device-on-mclk-enable.patch
new file mode 100644 (file)
index 0000000..33d3a61
--- /dev/null
@@ -0,0 +1,39 @@
+From 655ae29da72a693cf294bba3c3322e662ff75bd3 Mon Sep 17 00:00:00 2001
+From: Lubomir Rintel <lkundrak@v3.sk>
+Date: Wed, 27 Jan 2021 19:01:43 +0100
+Subject: media: marvell-ccic: power up the device on mclk enable
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+commit 655ae29da72a693cf294bba3c3322e662ff75bd3 upstream.
+
+Writing to REG_CLKCTRL with the power off causes a hang. Enable the
+device first.
+
+Cc: stable@vger.kernel.org # 5.10+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/marvell-ccic/mcam-core.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/media/platform/marvell-ccic/mcam-core.c
++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
+@@ -931,6 +931,7 @@ static int mclk_enable(struct clk_hw *hw
+               mclk_div = 2;
+       }
++      pm_runtime_get_sync(cam->dev);
+       clk_enable(cam->clk[0]);
+       mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
+       mcam_ctlr_power_up(cam);
+@@ -944,6 +945,7 @@ static void mclk_disable(struct clk_hw *
+       mcam_ctlr_power_down(cam);
+       clk_disable(cam->clk[0]);
++      pm_runtime_put(cam->dev);
+ }
+ static unsigned long mclk_recalc_rate(struct clk_hw *hw,
diff --git a/queue-5.10/media-smipcie-fix-interrupt-handling-and-ir-timeout.patch b/queue-5.10/media-smipcie-fix-interrupt-handling-and-ir-timeout.patch
new file mode 100644 (file)
index 0000000..30af581
--- /dev/null
@@ -0,0 +1,109 @@
+From 6532923237b427ed30cc7b4486f6f1ccdee3c647 Mon Sep 17 00:00:00 2001
+From: Sean Young <sean@mess.org>
+Date: Fri, 29 Jan 2021 11:54:53 +0100
+Subject: media: smipcie: fix interrupt handling and IR timeout
+
+From: Sean Young <sean@mess.org>
+
+commit 6532923237b427ed30cc7b4486f6f1ccdee3c647 upstream.
+
+After the first IR message, interrupts are no longer received. In addition,
+the code generates a timeout IR message of 10ms but sets the timeout value
+to 100ms, so no timeout was ever generated.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=204317
+
+Fixes: a49a7a4635de ("media: smipcie: add universal ir capability")
+Tested-by: Laz Lev <lazlev@web.de>
+Cc: stable@vger.kernel.org # v5.1+
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/pci/smipcie/smipcie-ir.c |   48 ++++++++++++++++++---------------
+ 1 file changed, 27 insertions(+), 21 deletions(-)
+
+--- a/drivers/media/pci/smipcie/smipcie-ir.c
++++ b/drivers/media/pci/smipcie/smipcie-ir.c
+@@ -60,38 +60,44 @@ static void smi_ir_decode(struct smi_rc
+ {
+       struct smi_dev *dev = ir->dev;
+       struct rc_dev *rc_dev = ir->rc_dev;
+-      u32 dwIRControl, dwIRData;
+-      u8 index, ucIRCount, readLoop;
++      u32 control, data;
++      u8 index, ir_count, read_loop;
+-      dwIRControl = smi_read(IR_Init_Reg);
++      control = smi_read(IR_Init_Reg);
+-      if (dwIRControl & rbIRVld) {
+-              ucIRCount = (u8) smi_read(IR_Data_Cnt);
++      dev_dbg(&rc_dev->dev, "ircontrol: 0x%08x\n", control);
+-              readLoop = ucIRCount/4;
+-              if (ucIRCount % 4)
+-                      readLoop += 1;
+-              for (index = 0; index < readLoop; index++) {
+-                      dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
+-
+-                      ir->irData[index*4 + 0] = (u8)(dwIRData);
+-                      ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
+-                      ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
+-                      ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
++      if (control & rbIRVld) {
++              ir_count = (u8)smi_read(IR_Data_Cnt);
++
++              dev_dbg(&rc_dev->dev, "ircount %d\n", ir_count);
++
++              read_loop = ir_count / 4;
++              if (ir_count % 4)
++                      read_loop += 1;
++              for (index = 0; index < read_loop; index++) {
++                      data = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
++                      dev_dbg(&rc_dev->dev, "IRData 0x%08x\n", data);
++
++                      ir->irData[index * 4 + 0] = (u8)(data);
++                      ir->irData[index * 4 + 1] = (u8)(data >> 8);
++                      ir->irData[index * 4 + 2] = (u8)(data >> 16);
++                      ir->irData[index * 4 + 3] = (u8)(data >> 24);
+               }
+-              smi_raw_process(rc_dev, ir->irData, ucIRCount);
+-              smi_set(IR_Init_Reg, rbIRVld);
++              smi_raw_process(rc_dev, ir->irData, ir_count);
+       }
+-      if (dwIRControl & rbIRhighidle) {
++      if (control & rbIRhighidle) {
+               struct ir_raw_event rawir = {};
++              dev_dbg(&rc_dev->dev, "high idle\n");
++
+               rawir.pulse = 0;
+               rawir.duration = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
+               ir_raw_event_store_with_filter(rc_dev, &rawir);
+-              smi_set(IR_Init_Reg, rbIRhighidle);
+       }
++      smi_set(IR_Init_Reg, rbIRVld);
+       ir_raw_event_handle(rc_dev);
+ }
+@@ -150,7 +156,7 @@ int smi_ir_init(struct smi_dev *dev)
+       rc_dev->dev.parent = &dev->pci_dev->dev;
+       rc_dev->map_name = dev->info->rc_map;
+-      rc_dev->timeout = MS_TO_US(100);
++      rc_dev->timeout = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
+       rc_dev->rx_resolution = SMI_SAMPLE_PERIOD;
+       ir->rc_dev = rc_dev;
+@@ -173,7 +179,7 @@ void smi_ir_exit(struct smi_dev *dev)
+       struct smi_rc *ir = &dev->ir;
+       struct rc_dev *rc_dev = ir->rc_dev;
+-      smi_ir_stop(ir);
+       rc_unregister_device(rc_dev);
++      smi_ir_stop(ir);
+       ir->rc_dev = NULL;
+ }
diff --git a/queue-5.10/powerpc-prom-fix-ibm-arch-vec-5-platform-support-scan.patch b/queue-5.10/powerpc-prom-fix-ibm-arch-vec-5-platform-support-scan.patch
new file mode 100644 (file)
index 0000000..e7a5f0b
--- /dev/null
@@ -0,0 +1,59 @@
+From ed5b00a05c2ae95b59adc3442f45944ec632e794 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <clg@kaod.org>
+Date: Fri, 22 Jan 2021 08:50:29 +0100
+Subject: powerpc/prom: Fix "ibm,arch-vec-5-platform-support" scan
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Cédric Le Goater <clg@kaod.org>
+
+commit ed5b00a05c2ae95b59adc3442f45944ec632e794 upstream.
+
+The "ibm,arch-vec-5-platform-support" property is a list of pairs of
+bytes representing the options and values supported by the platform
+firmware. At boot time, Linux scans this list and activates the
+available features it recognizes : Radix and XIVE.
+
+A recent change modified the number of entries to loop on and 8 bytes,
+4 pairs of { options, values } entries are always scanned. This is
+fine on KVM but not on PowerVM which can advertises less. As a
+consequence on this platform, Linux reads extra entries pointing to
+random data, interprets these as available features and tries to
+activate them, leading to a firmware crash in
+ibm,client-architecture-support.
+
+Fix that by using the property length of "ibm,arch-vec-5-platform-support".
+
+Fixes: ab91239942a9 ("powerpc/prom: Remove VLA in prom_check_platform_support()")
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210122075029.797013-1-clg@kaod.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/prom_init.c |   12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -1330,14 +1330,10 @@ static void __init prom_check_platform_s
+               if (prop_len > sizeof(vec))
+                       prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
+                                   prop_len);
+-              prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+-                           &vec, sizeof(vec));
+-              for (i = 0; i < sizeof(vec); i += 2) {
+-                      prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+-                                                                , vec[i]
+-                                                                , vec[i + 1]);
+-                      prom_parse_platform_support(vec[i], vec[i + 1],
+-                                                  &supported);
++              prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
++              for (i = 0; i < prop_len; i += 2) {
++                      prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
++                      prom_parse_platform_support(vec[i], vec[i + 1], &supported);
+               }
+       }
diff --git a/queue-5.10/rcu-nocb-perform-deferred-wake-up-before-last-idle-s-need_resched-check.patch b/queue-5.10/rcu-nocb-perform-deferred-wake-up-before-last-idle-s-need_resched-check.patch
new file mode 100644 (file)
index 0000000..2d208f2
--- /dev/null
@@ -0,0 +1,90 @@
+From 43789ef3f7d61aa7bed0cb2764e588fc990c30ef Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 1 Feb 2021 00:05:45 +0100
+Subject: rcu/nocb: Perform deferred wake up before last idle's need_resched() check
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit 43789ef3f7d61aa7bed0cb2764e588fc990c30ef upstream.
+
+Entering RCU idle mode may cause a deferred wake up of an RCU NOCB_GP
+kthread (rcuog) to be serviced.
+
+Usually a local wake up happening while running the idle task is handled
+in one of the need_resched() checks carefully placed within the idle
+loop that can break to the scheduler.
+
+Unfortunately the call to rcu_idle_enter() is already beyond the last
+generic need_resched() check and we may halt the CPU with a resched
+request unhandled, leaving the task hanging.
+
+Fix this with splitting the rcuog wakeup handling from rcu_idle_enter()
+and place it before the last generic need_resched() check in the idle
+loop. It is then assumed that no call to call_rcu() will be performed
+after that in the idle loop until the CPU is put in low power mode.
+
+Fixes: 96d3fd0d315a (rcu: Break call_rcu() deadlock involving scheduler and perf)
+Reported-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20210131230548.32970-3-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rcupdate.h |    2 ++
+ kernel/rcu/tree.c        |    3 ---
+ kernel/rcu/tree_plugin.h |    5 +++++
+ kernel/sched/idle.c      |    1 +
+ 4 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -110,8 +110,10 @@ static inline void rcu_user_exit(void) {
+ #ifdef CONFIG_RCU_NOCB_CPU
+ void rcu_init_nohz(void);
++void rcu_nocb_flush_deferred_wakeup(void);
+ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static inline void rcu_init_nohz(void) { }
++static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+ #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+ /**
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -663,10 +663,7 @@ static noinstr void rcu_eqs_enter(bool u
+  */
+ void rcu_idle_enter(void)
+ {
+-      struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+-
+       lockdep_assert_irqs_disabled();
+-      do_nocb_deferred_wakeup(rdp);
+       rcu_eqs_enter(false);
+ }
+ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -2187,6 +2187,11 @@ static void do_nocb_deferred_wakeup(stru
+               do_nocb_deferred_wakeup_common(rdp);
+ }
++void rcu_nocb_flush_deferred_wakeup(void)
++{
++      do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
++}
++
+ void __init rcu_init_nohz(void)
+ {
+       int cpu;
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -285,6 +285,7 @@ static void do_idle(void)
+               }
+               arch_cpu_idle_enter();
++              rcu_nocb_flush_deferred_wakeup();
+               /*
+                * In poll mode we reenable interrupts and spin. Also if we
diff --git a/queue-5.10/rcu-nocb-trigger-self-ipi-on-late-deferred-wake-up-before-user-resume.patch b/queue-5.10/rcu-nocb-trigger-self-ipi-on-late-deferred-wake-up-before-user-resume.patch
new file mode 100644 (file)
index 0000000..bcb71f9
--- /dev/null
@@ -0,0 +1,174 @@
+From f8bb5cae9616224a39cbb399de382d36ac41df10 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 1 Feb 2021 00:05:46 +0100
+Subject: rcu/nocb: Trigger self-IPI on late deferred wake up before user resume
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit f8bb5cae9616224a39cbb399de382d36ac41df10 upstream.
+
+Entering RCU idle mode may cause a deferred wake up of an RCU NOCB_GP
+kthread (rcuog) to be serviced.
+
+Unfortunately the call to rcu_user_enter() is already past the last
+rescheduling opportunity before we resume to userspace or to guest mode.
+We may escape there with the woken task ignored.
+
+The ultimate resort to fix every callsites is to trigger a self-IPI
+(nohz_full depends on arch to implement arch_irq_work_raise()) that will
+trigger a reschedule on IRQ tail or guest exit.
+
+Eventually every site that want a saner treatment will need to carefully
+place a call to rcu_nocb_flush_deferred_wakeup() before the last explicit
+need_resched() check upon resume.
+
+Fixes: 96d3fd0d315a (rcu: Break call_rcu() deadlock involving scheduler and perf)
+Reported-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20210131230548.32970-4-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree.c        |   21 ++++++++++++++++++++-
+ kernel/rcu/tree.h        |    2 +-
+ kernel/rcu/tree_plugin.h |   25 ++++++++++++++++---------
+ 3 files changed, 37 insertions(+), 11 deletions(-)
+
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -669,6 +669,18 @@ void rcu_idle_enter(void)
+ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+ #ifdef CONFIG_NO_HZ_FULL
++
++/*
++ * An empty function that will trigger a reschedule on
++ * IRQ tail once IRQs get re-enabled on userspace resume.
++ */
++static void late_wakeup_func(struct irq_work *work)
++{
++}
++
++static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
++      IRQ_WORK_INIT(late_wakeup_func);
++
+ /**
+  * rcu_user_enter - inform RCU that we are resuming userspace.
+  *
+@@ -686,12 +698,19 @@ noinstr void rcu_user_enter(void)
+       lockdep_assert_irqs_disabled();
++      /*
++       * We may be past the last rescheduling opportunity in the entry code.
++       * Trigger a self IPI that will fire and reschedule once we resume to
++       * user/guest mode.
++       */
+       instrumentation_begin();
+-      do_nocb_deferred_wakeup(rdp);
++      if (do_nocb_deferred_wakeup(rdp) && need_resched())
++              irq_work_queue(this_cpu_ptr(&late_wakeup_work));
+       instrumentation_end();
+       rcu_eqs_enter(true);
+ }
++
+ #endif /* CONFIG_NO_HZ_FULL */
+ /**
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -431,7 +431,7 @@ static bool rcu_nocb_try_bypass(struct r
+ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+                                unsigned long flags);
+ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
+ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
+ static void rcu_spawn_cpu_nocb_kthread(int cpu);
+ static void __init rcu_spawn_nocb_kthreads(void);
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1631,8 +1631,8 @@ bool rcu_is_nocb_cpu(int cpu)
+  * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
+  * and this function releases it.
+  */
+-static void wake_nocb_gp(struct rcu_data *rdp, bool force,
+-                         unsigned long flags)
++static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
++                       unsigned long flags)
+       __releases(rdp->nocb_lock)
+ {
+       bool needwake = false;
+@@ -1643,7 +1643,7 @@ static void wake_nocb_gp(struct rcu_data
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                   TPS("AlreadyAwake"));
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+-              return;
++              return false;
+       }
+       del_timer(&rdp->nocb_timer);
+       rcu_nocb_unlock_irqrestore(rdp, flags);
+@@ -1656,6 +1656,8 @@ static void wake_nocb_gp(struct rcu_data
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+       if (needwake)
+               wake_up_process(rdp_gp->nocb_gp_kthread);
++
++      return needwake;
+ }
+ /*
+@@ -2152,20 +2154,23 @@ static int rcu_nocb_need_deferred_wakeup
+ }
+ /* Do a deferred wakeup of rcu_nocb_kthread(). */
+-static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+ {
+       unsigned long flags;
+       int ndw;
++      int ret;
+       rcu_nocb_lock_irqsave(rdp, flags);
+       if (!rcu_nocb_need_deferred_wakeup(rdp)) {
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+-              return;
++              return false;
+       }
+       ndw = READ_ONCE(rdp->nocb_defer_wakeup);
+       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+-      wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
++      ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
++
++      return ret;
+ }
+ /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
+@@ -2181,10 +2186,11 @@ static void do_nocb_deferred_wakeup_time
+  * This means we do an inexact common-case check.  Note that if
+  * we miss, ->nocb_timer will eventually clean things up.
+  */
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+ {
+       if (rcu_nocb_need_deferred_wakeup(rdp))
+-              do_nocb_deferred_wakeup_common(rdp);
++              return do_nocb_deferred_wakeup_common(rdp);
++      return false;
+ }
+ void rcu_nocb_flush_deferred_wakeup(void)
+@@ -2523,8 +2529,9 @@ static int rcu_nocb_need_deferred_wakeup
+       return false;
+ }
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+ {
++      return false;
+ }
+ static void rcu_spawn_cpu_nocb_kthread(int cpu)
diff --git a/queue-5.10/rcu-pull-deferred-rcuog-wake-up-to-rcu_eqs_enter-callers.patch b/queue-5.10/rcu-pull-deferred-rcuog-wake-up-to-rcu_eqs_enter-callers.patch
new file mode 100644 (file)
index 0000000..fbc8b7a
--- /dev/null
@@ -0,0 +1,59 @@
+From 54b7429efffc99e845ba9381bee3244f012a06c2 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 1 Feb 2021 00:05:44 +0100
+Subject: rcu: Pull deferred rcuog wake up to rcu_eqs_enter() callers
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit 54b7429efffc99e845ba9381bee3244f012a06c2 upstream.
+
+Deferred wakeup of rcuog kthreads upon RCU idle mode entry is going to
+be handled differently whether initiated by idle, user or guest. Prepare
+with pulling that control up to rcu_eqs_enter() callers.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20210131230548.32970-2-frederic@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -636,7 +636,6 @@ static noinstr void rcu_eqs_enter(bool u
+       trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+       rdp = this_cpu_ptr(&rcu_data);
+-      do_nocb_deferred_wakeup(rdp);
+       rcu_prepare_for_idle();
+       rcu_preempt_deferred_qs(current);
+@@ -664,7 +663,10 @@ static noinstr void rcu_eqs_enter(bool u
+  */
+ void rcu_idle_enter(void)
+ {
++      struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
++
+       lockdep_assert_irqs_disabled();
++      do_nocb_deferred_wakeup(rdp);
+       rcu_eqs_enter(false);
+ }
+ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+@@ -683,7 +685,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+  */
+ noinstr void rcu_user_enter(void)
+ {
++      struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
++
+       lockdep_assert_irqs_disabled();
++
++      instrumentation_begin();
++      do_nocb_deferred_wakeup(rdp);
++      instrumentation_end();
++
+       rcu_eqs_enter(true);
+ }
+ #endif /* CONFIG_NO_HZ_FULL */
index 9a69598a22e51f898740ada09bdf27d2766f59c7..5773ba2f9c910f9ab85e9432aadc016085a5acdd 100644 (file)
@@ -558,3 +558,20 @@ arm64-dts-agilex-fix-phy-interface-bit-shift-for-gmac1-and-gmac2.patch
 staging-mt7621-dma-mtk-hsdma.c-hsdma-mt7621.c.patch
 staging-gdm724x-fix-dma-from-stack.patch
 staging-rtl8188eu-add-edimax-ew-7811un-v2-to-device-table.patch
+floppy-reintroduce-o_ndelay-fix.patch
+media-i2c-max9286-fix-access-to-unallocated-memory.patch
+media-ir_toy-add-another-ir-droid-device.patch
+media-ipu3-cio2-fix-mbus_code-processing-in-cio2_subdev_set_fmt.patch
+media-marvell-ccic-power-up-the-device-on-mclk-enable.patch
+media-smipcie-fix-interrupt-handling-and-ir-timeout.patch
+x86-virt-eat-faults-on-vmxoff-in-reboot-flows.patch
+x86-reboot-force-all-cpus-to-exit-vmx-root-if-vmx-is-supported.patch
+x86-fault-fix-amd-erratum-91-errata-fixup-for-user-code.patch
+x86-entry-fix-instrumentation-annotation.patch
+powerpc-prom-fix-ibm-arch-vec-5-platform-support-scan.patch
+rcu-pull-deferred-rcuog-wake-up-to-rcu_eqs_enter-callers.patch
+rcu-nocb-perform-deferred-wake-up-before-last-idle-s-need_resched-check.patch
+rcu-nocb-trigger-self-ipi-on-late-deferred-wake-up-before-user-resume.patch
+entry-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch
+entry-kvm-explicitly-flush-pending-rcuog-wakeup-before-last-rescheduling-point.patch
+kprobes-fix-to-delay-the-kprobes-jump-optimization.patch
diff --git a/queue-5.10/x86-entry-fix-instrumentation-annotation.patch b/queue-5.10/x86-entry-fix-instrumentation-annotation.patch
new file mode 100644 (file)
index 0000000..28f9a10
--- /dev/null
@@ -0,0 +1,33 @@
+From 15f720aabe71a5662c4198b22532d95bbeec80ef Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 10 Feb 2021 00:40:42 +0100
+Subject: x86/entry: Fix instrumentation annotation
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 15f720aabe71a5662c4198b22532d95bbeec80ef upstream.
+
+Embracing a callout into instrumentation_begin() / instrumentation_begin()
+does not really make sense. Make the latter instrumentation_end().
+
+Fixes: 2f6474e4636b ("x86/entry: Switch XEN/PV hypercall entry to IDTENTRY")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210210002512.106502464@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/common.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -304,7 +304,7 @@ __visible noinstr void xen_pv_evtchn_do_
+       instrumentation_begin();
+       run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+-      instrumentation_begin();
++      instrumentation_end();
+       set_irq_regs(old_regs);
diff --git a/queue-5.10/x86-fault-fix-amd-erratum-91-errata-fixup-for-user-code.patch b/queue-5.10/x86-fault-fix-amd-erratum-91-errata-fixup-for-user-code.patch
new file mode 100644 (file)
index 0000000..29a0fa3
--- /dev/null
@@ -0,0 +1,95 @@
+From 35f1c89b0cce247bf0213df243ed902989b1dcda Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 9 Feb 2021 18:33:33 -0800
+Subject: x86/fault: Fix AMD erratum #91 errata fixup for user code
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 35f1c89b0cce247bf0213df243ed902989b1dcda upstream.
+
+The recent rework of probe_kernel_address() and its conversion to
+get_kernel_nofault() inadvertently broke is_prefetch(). Before this
+change, probe_kernel_address() was used as a sloppy "read user or
+kernel memory" helper, but it doesn't do that any more. The new
+get_kernel_nofault() reads *kernel* memory only, which completely broke
+is_prefetch() for user access.
+
+Adjust the code to the correct accessor based on access mode. The
+manual address bounds check is no longer necessary, since the accessor
+helpers (get_user() / get_kernel_nofault()) do the right thing all by
+themselves. As a bonus, by using the correct accessor, the open-coded
+address bounds check is not needed anymore.
+
+ [ bp: Massage commit message. ]
+
+Fixes: eab0c6089b68 ("maccess: unify the probe kernel arch hooks")
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/b91f7f92f3367d2d3a88eec3b09c6aab1b2dc8ef.1612924255.git.luto@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/fault.c |   27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -53,7 +53,7 @@ kmmio_fault(struct pt_regs *regs, unsign
+  * 32-bit mode:
+  *
+  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+- *   Check that here and ignore it.
++ *   Check that here and ignore it.  This is AMD erratum #91.
+  *
+  * 64-bit mode:
+  *
+@@ -82,11 +82,7 @@ check_prefetch_opcode(struct pt_regs *re
+ #ifdef CONFIG_X86_64
+       case 0x40:
+               /*
+-               * In AMD64 long mode 0x40..0x4F are valid REX prefixes
+-               * Need to figure out under what instruction mode the
+-               * instruction was issued. Could check the LDT for lm,
+-               * but for now it's good enough to assume that long
+-               * mode only uses well known segments or kernel.
++               * In 64-bit mode 0x40..0x4F are valid REX prefixes
+                */
+               return (!user_mode(regs) || user_64bit_mode(regs));
+ #endif
+@@ -126,20 +122,31 @@ is_prefetch(struct pt_regs *regs, unsign
+       instr = (void *)convert_ip_to_linear(current, regs);
+       max_instr = instr + 15;
+-      if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
+-              return 0;
++      /*
++       * This code has historically always bailed out if IP points to a
++       * not-present page (e.g. due to a race).  No one has ever
++       * complained about this.
++       */
++      pagefault_disable();
+       while (instr < max_instr) {
+               unsigned char opcode;
+-              if (get_kernel_nofault(opcode, instr))
+-                      break;
++              if (user_mode(regs)) {
++                      if (get_user(opcode, instr))
++                              break;
++              } else {
++                      if (get_kernel_nofault(opcode, instr))
++                              break;
++              }
+               instr++;
+               if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
+                       break;
+       }
++
++      pagefault_enable();
+       return prefetch;
+ }
diff --git a/queue-5.10/x86-reboot-force-all-cpus-to-exit-vmx-root-if-vmx-is-supported.patch b/queue-5.10/x86-reboot-force-all-cpus-to-exit-vmx-root-if-vmx-is-supported.patch
new file mode 100644 (file)
index 0000000..66ca479
--- /dev/null
@@ -0,0 +1,72 @@
+From ed72736183c45a413a8d6974dd04be90f514cb6b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 30 Dec 2020 16:26:55 -0800
+Subject: x86/reboot: Force all cpus to exit VMX root if VMX is supported
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit ed72736183c45a413a8d6974dd04be90f514cb6b upstream.
+
+Force all CPUs to do VMXOFF (via NMI shootdown) during an emergency
+reboot if VMX is _supported_, as VMX being off on the current CPU does
+not prevent other CPUs from being in VMX root (post-VMXON).  This fixes
+a bug where a crash/panic reboot could leave other CPUs in VMX root and
+prevent them from being woken via INIT-SIPI-SIPI in the new kernel.
+
+Fixes: d176720d34c7 ("x86: disable VMX on all CPUs on reboot")
+Cc: stable@vger.kernel.org
+Suggested-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: David P. Reed <dpreed@deepplum.com>
+[sean: reworked changelog and further tweaked comment]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20201231002702.2223707-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/reboot.c |   30 ++++++++++--------------------
+ 1 file changed, 10 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -538,31 +538,21 @@ static void emergency_vmx_disable_all(vo
+       local_irq_disable();
+       /*
+-       * We need to disable VMX on all CPUs before rebooting, otherwise
+-       * we risk hanging up the machine, because the CPU ignores INIT
+-       * signals when VMX is enabled.
++       * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
++       * the machine, because the CPU blocks INIT when it's in VMX root.
+        *
+-       * We can't take any locks and we may be on an inconsistent
+-       * state, so we use NMIs as IPIs to tell the other CPUs to disable
+-       * VMX and halt.
++       * We can't take any locks and we may be on an inconsistent state, so
++       * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
+        *
+-       * For safety, we will avoid running the nmi_shootdown_cpus()
+-       * stuff unnecessarily, but we don't have a way to check
+-       * if other CPUs have VMX enabled. So we will call it only if the
+-       * CPU we are running on has VMX enabled.
+-       *
+-       * We will miss cases where VMX is not enabled on all CPUs. This
+-       * shouldn't do much harm because KVM always enable VMX on all
+-       * CPUs anyway. But we can miss it on the small window where KVM
+-       * is still enabling VMX.
++       * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
++       * doesn't prevent a different CPU from being in VMX root operation.
+        */
+-      if (cpu_has_vmx() && cpu_vmx_enabled()) {
+-              /* Disable VMX on this CPU. */
+-              cpu_vmxoff();
++      if (cpu_has_vmx()) {
++              /* Safely force _this_ CPU out of VMX root operation. */
++              __cpu_emergency_vmxoff();
+-              /* Halt and disable VMX on the other CPUs */
++              /* Halt and exit VMX root operation on the other CPUs. */
+               nmi_shootdown_cpus(vmxoff_nmi);
+-
+       }
+ }
diff --git a/queue-5.10/x86-virt-eat-faults-on-vmxoff-in-reboot-flows.patch b/queue-5.10/x86-virt-eat-faults-on-vmxoff-in-reboot-flows.patch
new file mode 100644 (file)
index 0000000..3a02046
--- /dev/null
@@ -0,0 +1,64 @@
+From aec511ad153556640fb1de38bfe00c69464f997f Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 30 Dec 2020 16:26:54 -0800
+Subject: x86/virt: Eat faults on VMXOFF in reboot flows
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit aec511ad153556640fb1de38bfe00c69464f997f upstream.
+
+Silently ignore all faults on VMXOFF in the reboot flows as such faults
+are all but guaranteed to be due to the CPU not being in VMX root.
+Because (a) VMXOFF may be executed in NMI context, e.g. after VMXOFF but
+before CR4.VMXE is cleared, (b) there's no way to query the CPU's VMX
+state without faulting, and (c) the whole point is to get out of VMX
+root, eating faults is the simplest way to achieve the desired behaior.
+
+Technically, VMXOFF can fault (or fail) for other reasons, but all other
+fault and failure scenarios are mode related, i.e. the kernel would have
+to magically end up in RM, V86, compat mode, at CPL>0, or running with
+the SMI Transfer Monitor active.  The kernel is beyond hosed if any of
+those scenarios are encountered; trying to do something fancy in the
+error path to handle them cleanly is pointless.
+
+Fixes: 1e9931146c74 ("x86: asm/virtext.h: add cpu_vmxoff() inline function")
+Reported-by: David P. Reed <dpreed@deepplum.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20201231002702.2223707-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/virtext.h |   17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void)
+ }
+-/** Disable VMX on the current CPU
++/**
++ * cpu_vmxoff() - Disable VMX on the current CPU
+  *
+- * vmxoff causes a undefined-opcode exception if vmxon was not run
+- * on the CPU previously. Only call this function if you know VMX
+- * is enabled.
++ * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
++ *
++ * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
++ * atomically track post-VMXON state, e.g. this may be called in NMI context.
++ * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
++ * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
++ * magically in RM, VM86, compat mode, or at CPL>0.
+  */
+ static inline void cpu_vmxoff(void)
+ {
+-      asm volatile ("vmxoff");
++      asm_volatile_goto("1: vmxoff\n\t"
++                        _ASM_EXTABLE(1b, %l[fault]) :::: fault);
++fault:
+       cr4_clear_bits(X86_CR4_VMXE);
+ }