]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Jan 2017 10:28:03 +0000 (11:28 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Jan 2017 10:28:03 +0000 (11:28 +0100)
added patches:
jump_labels-api-for-flushing-deferred-jump-label-updates.patch
kvm-x86-add-align16-instruction-flag.patch
kvm-x86-add-asm_safe-wrapper.patch

queue-4.9/jump_labels-api-for-flushing-deferred-jump-label-updates.patch [new file with mode: 0644]
queue-4.9/kvm-x86-add-align16-instruction-flag.patch [new file with mode: 0644]
queue-4.9/kvm-x86-add-asm_safe-wrapper.patch [new file with mode: 0644]
queue-4.9/kvm-x86-emulate-fxsave-and-fxrstor.patch
queue-4.9/kvm-x86-introduce-segmented_write_std.patch
queue-4.9/series

diff --git a/queue-4.9/jump_labels-api-for-flushing-deferred-jump-label-updates.patch b/queue-4.9/jump_labels-api-for-flushing-deferred-jump-label-updates.patch
new file mode 100644 (file)
index 0000000..8142945
--- /dev/null
@@ -0,0 +1,61 @@
+From b6416e61012429e0277bd15a229222fd17afc1c1 Mon Sep 17 00:00:00 2001
+From: David Matlack <dmatlack@google.com>
+Date: Fri, 16 Dec 2016 14:30:35 -0800
+Subject: jump_labels: API for flushing deferred jump label updates
+
+From: David Matlack <dmatlack@google.com>
+
+commit b6416e61012429e0277bd15a229222fd17afc1c1 upstream.
+
+Modules that use static_key_deferred need a way to synchronize with
+any delayed work that is still pending when the module is unloaded.
+Introduce static_key_deferred_flush() which flushes any pending
+jump label updates.
+
+Signed-off-by: David Matlack <dmatlack@google.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/jump_label_ratelimit.h |    5 +++++
+ kernel/jump_label.c                  |    7 +++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/include/linux/jump_label_ratelimit.h
++++ b/include/linux/jump_label_ratelimit.h
+@@ -14,6 +14,7 @@ struct static_key_deferred {
+ #ifdef HAVE_JUMP_LABEL
+ extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
++extern void static_key_deferred_flush(struct static_key_deferred *key);
+ extern void
+ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_d
+       STATIC_KEY_CHECK_USE();
+       static_key_slow_dec(&key->key);
+ }
++static inline void static_key_deferred_flush(struct static_key_deferred *key)
++{
++      STATIC_KEY_CHECK_USE();
++}
+ static inline void
+ jump_label_rate_limit(struct static_key_deferred *key,
+               unsigned long rl)
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
++void static_key_deferred_flush(struct static_key_deferred *key)
++{
++      STATIC_KEY_CHECK_USE();
++      flush_delayed_work(&key->work);
++}
++EXPORT_SYMBOL_GPL(static_key_deferred_flush);
++
+ void jump_label_rate_limit(struct static_key_deferred *key,
+               unsigned long rl)
+ {
diff --git a/queue-4.9/kvm-x86-add-align16-instruction-flag.patch b/queue-4.9/kvm-x86-add-align16-instruction-flag.patch
new file mode 100644 (file)
index 0000000..6c77582
--- /dev/null
@@ -0,0 +1,73 @@
+From d3fe959f81024072068e9ed86b39c2acfd7462a9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Tue, 8 Nov 2016 20:54:16 +0100
+Subject: KVM: x86: add Align16 instruction flag
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Radim Krčmář <rkrcmar@redhat.com>
+
+commit d3fe959f81024072068e9ed86b39c2acfd7462a9 upstream.
+
+Needed for FXSAVE and FXRSTOR.
+
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/emulate.c |   20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -171,6 +171,7 @@
+ #define NearBranch  ((u64)1 << 52)  /* Near branches */
+ #define No16      ((u64)1 << 53)  /* No 16 bit operand */
+ #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
++#define Aligned16   ((u64)1 << 55)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
+ #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
+@@ -632,21 +633,24 @@ static void set_segment_selector(struct
+  * depending on whether they're AVX encoded or not.
+  *
+  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
+- * subject to the same check.
++ * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
++ * 512 bytes of data must be aligned to a 16 byte boundary.
+  */
+-static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
++static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
+ {
+       if (likely(size < 16))
+-              return false;
++              return 1;
+       if (ctxt->d & Aligned)
+-              return true;
++              return size;
+       else if (ctxt->d & Unaligned)
+-              return false;
++              return 1;
+       else if (ctxt->d & Avx)
+-              return false;
++              return 1;
++      else if (ctxt->d & Aligned16)
++              return 16;
+       else
+-              return true;
++              return size;
+ }
+ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+@@ -704,7 +708,7 @@ static __always_inline int __linearize(s
+               }
+               break;
+       }
+-      if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
++      if (la & (insn_alignment(ctxt, size) - 1))
+               return emulate_gp(ctxt, 0);
+       return X86EMUL_CONTINUE;
+ bad:
diff --git a/queue-4.9/kvm-x86-add-asm_safe-wrapper.patch b/queue-4.9/kvm-x86-add-asm_safe-wrapper.patch
new file mode 100644 (file)
index 0000000..c9c075c
--- /dev/null
@@ -0,0 +1,77 @@
+From aabba3c6abd50b05b1fc2c6ec44244aa6bcda576 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Tue, 8 Nov 2016 20:54:18 +0100
+Subject: KVM: x86: add asm_safe wrapper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Radim Krčmář <rkrcmar@redhat.com>
+
+commit aabba3c6abd50b05b1fc2c6ec44244aa6bcda576 upstream.
+
+Move the existing exception handling for inline assembly into a macro
+and switch its return values to X86EMUL type.
+
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/emulate.c |   34 +++++++++++++++++++++++-----------
+ 1 file changed, 23 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -447,6 +447,26 @@ FOP_END;
+ FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+ FOP_END;
++/*
++ * XXX: inoutclob user must know where the argument is being expanded.
++ *      Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
++ */
++#define asm_safe(insn, inoutclob...) \
++({ \
++      int _fault = 0; \
++ \
++      asm volatile("1:" insn "\n" \
++                   "2:\n" \
++                   ".pushsection .fixup, \"ax\"\n" \
++                   "3: movl $1, %[_fault]\n" \
++                   "   jmp  2b\n" \
++                   ".popsection\n" \
++                   _ASM_EXTABLE(1b, 3b) \
++                   : [_fault] "+qm"(_fault) inoutclob ); \
++ \
++      _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
++})
++
+ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
+                                   enum x86_intercept intercept,
+                                   enum x86_intercept_stage stage)
+@@ -5098,21 +5118,13 @@ static bool string_insn_completed(struct
+ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
+ {
+-      bool fault = false;
++      int rc;
+       ctxt->ops->get_fpu(ctxt);
+-      asm volatile("1: fwait \n\t"
+-                   "2: \n\t"
+-                   ".pushsection .fixup,\"ax\" \n\t"
+-                   "3: \n\t"
+-                   "movb $1, %[fault] \n\t"
+-                   "jmp 2b \n\t"
+-                   ".popsection \n\t"
+-                   _ASM_EXTABLE(1b, 3b)
+-                   : [fault]"+qm"(fault));
++      rc = asm_safe("fwait");
+       ctxt->ops->put_fpu(ctxt);
+-      if (unlikely(fault))
++      if (unlikely(rc != X86EMUL_CONTINUE))
+               return emulate_exception(ctxt, MF_VECTOR, 0, false);
+       return X86EMUL_CONTINUE;
index c3ea7c53e6465c7999bdfb647bd6ca5238cd338d..aa8e71c331bf4d82ba8cf59f3008755580bded13 100644 (file)
@@ -42,7 +42,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/emulate.c
 +++ b/arch/x86/kvm/emulate.c
-@@ -3870,6 +3870,131 @@ static int em_movsxd(struct x86_emulate_
+@@ -3894,6 +3894,131 @@ static int em_movsxd(struct x86_emulate_
        return X86EMUL_CONTINUE;
  }
  
@@ -174,7 +174,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static bool valid_cr(int nr)
  {
        switch (nr) {
-@@ -4222,7 +4347,9 @@ static const struct gprefix pfx_0f_ae_7
+@@ -4246,7 +4371,9 @@ static const struct gprefix pfx_0f_ae_7
  };
  
  static const struct group_dual group15 = { {
index 17f4f4466c4ae144edaa6a545fb52322e092f418..76df9acc83c9bbf558f3598927fdb4c7c4cf3fad 100644 (file)
@@ -31,7 +31,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/emulate.c
 +++ b/arch/x86/kvm/emulate.c
-@@ -791,6 +791,20 @@ static int segmented_read_std(struct x86
+@@ -815,6 +815,20 @@ static int segmented_read_std(struct x86
        return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
  }
  
@@ -52,7 +52,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  /*
   * Prefetch the remaining bytes of the instruction without crossing page
   * boundary if they are not in fetch_cache yet.
-@@ -3686,8 +3700,8 @@ static int emulate_store_desc_ptr(struct
+@@ -3710,8 +3724,8 @@ static int emulate_store_desc_ptr(struct
        }
        /* Disable writeback. */
        ctxt->dst.type = OP_NONE;
@@ -63,7 +63,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static int em_sgdt(struct x86_emulate_ctxt *ctxt)
-@@ -3933,7 +3947,7 @@ static int em_fxsave(struct x86_emulate_
+@@ -3957,7 +3971,7 @@ static int em_fxsave(struct x86_emulate_
        else
                size = offsetof(struct fxregs_state, xmm_space[0]);
  
@@ -72,7 +72,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
-@@ -3975,7 +3989,7 @@ static int em_fxrstor(struct x86_emulate
+@@ -3999,7 +4013,7 @@ static int em_fxrstor(struct x86_emulate
        if (rc != X86EMUL_CONTINUE)
                return rc;
  
index a68230699e28d4bfa2e9d4b77f9cbe4702b8d8de..56daa001b165a6d1b58e15e07ceb01d5e8eae3ad 100644 (file)
@@ -21,7 +21,10 @@ mm-slab.c-fix-slab-freelist-randomization-duplicate-entries.patch
 mm-hugetlb.c-fix-reservation-race-when-freeing-surplus-pages.patch
 kvm-x86-fix-emulation-of-mov-ss-null-selector.patch
 kvm-eventfd-fix-null-deref-irqbypass-consumer.patch
+jump_labels-api-for-flushing-deferred-jump-label-updates.patch
 kvm-x86-flush-pending-lapic-jump-label-updates-on-module-unload.patch
 kvm-x86-fix-null-deref-in-vcpu_scan_ioapic.patch
+kvm-x86-add-align16-instruction-flag.patch
+kvm-x86-add-asm_safe-wrapper.patch
 kvm-x86-emulate-fxsave-and-fxrstor.patch
 kvm-x86-introduce-segmented_write_std.patch