]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Jan 2017 10:27:42 +0000 (11:27 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Jan 2017 10:27:42 +0000 (11:27 +0100)
added patches:
jump_labels-api-for-flushing-deferred-jump-label-updates.patch
kvm-x86-add-align16-instruction-flag.patch
kvm-x86-add-asm_safe-wrapper.patch

queue-4.4/jump_labels-api-for-flushing-deferred-jump-label-updates.patch [new file with mode: 0644]
queue-4.4/kvm-x86-add-align16-instruction-flag.patch [new file with mode: 0644]
queue-4.4/kvm-x86-add-asm_safe-wrapper.patch [new file with mode: 0644]
queue-4.4/kvm-x86-emulate-fxsave-and-fxrstor.patch
queue-4.4/kvm-x86-introduce-segmented_write_std.patch
queue-4.4/series

diff --git a/queue-4.4/jump_labels-api-for-flushing-deferred-jump-label-updates.patch b/queue-4.4/jump_labels-api-for-flushing-deferred-jump-label-updates.patch
new file mode 100644 (file)
index 0000000..c9d5d8e
--- /dev/null
@@ -0,0 +1,61 @@
+From b6416e61012429e0277bd15a229222fd17afc1c1 Mon Sep 17 00:00:00 2001
+From: David Matlack <dmatlack@google.com>
+Date: Fri, 16 Dec 2016 14:30:35 -0800
+Subject: jump_labels: API for flushing deferred jump label updates
+
+From: David Matlack <dmatlack@google.com>
+
+commit b6416e61012429e0277bd15a229222fd17afc1c1 upstream.
+
+Modules that use static_key_deferred need a way to synchronize with
+any delayed work that is still pending when the module is unloaded.
+Introduce static_key_deferred_flush() which flushes any pending
+jump label updates.
+
+Signed-off-by: David Matlack <dmatlack@google.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/jump_label_ratelimit.h |    5 +++++
+ kernel/jump_label.c                  |    7 +++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/include/linux/jump_label_ratelimit.h
++++ b/include/linux/jump_label_ratelimit.h
+@@ -14,6 +14,7 @@ struct static_key_deferred {
+ #ifdef HAVE_JUMP_LABEL
+ extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
++extern void static_key_deferred_flush(struct static_key_deferred *key);
+ extern void
+ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_d
+       STATIC_KEY_CHECK_USE();
+       static_key_slow_dec(&key->key);
+ }
++static inline void static_key_deferred_flush(struct static_key_deferred *key)
++{
++      STATIC_KEY_CHECK_USE();
++}
+ static inline void
+ jump_label_rate_limit(struct static_key_deferred *key,
+               unsigned long rl)
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -138,6 +138,13 @@ void static_key_slow_dec_deferred(struct
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
++void static_key_deferred_flush(struct static_key_deferred *key)
++{
++      STATIC_KEY_CHECK_USE();
++      flush_delayed_work(&key->work);
++}
++EXPORT_SYMBOL_GPL(static_key_deferred_flush);
++
+ void jump_label_rate_limit(struct static_key_deferred *key,
+               unsigned long rl)
+ {
diff --git a/queue-4.4/kvm-x86-add-align16-instruction-flag.patch b/queue-4.4/kvm-x86-add-align16-instruction-flag.patch
new file mode 100644 (file)
index 0000000..5df8fe0
--- /dev/null
@@ -0,0 +1,73 @@
+From d3fe959f81024072068e9ed86b39c2acfd7462a9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Tue, 8 Nov 2016 20:54:16 +0100
+Subject: KVM: x86: add Align16 instruction flag
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Radim Krčmář <rkrcmar@redhat.com>
+
+commit d3fe959f81024072068e9ed86b39c2acfd7462a9 upstream.
+
+Needed for FXSAVE and FXRSTOR.
+
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/emulate.c |   20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -172,6 +172,7 @@
+ #define NearBranch  ((u64)1 << 52)  /* Near branches */
+ #define No16      ((u64)1 << 53)  /* No 16 bit operand */
+ #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
++#define Aligned16   ((u64)1 << 55)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
+ #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
+@@ -620,21 +621,24 @@ static void set_segment_selector(struct
+  * depending on whether they're AVX encoded or not.
+  *
+  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
+- * subject to the same check.
++ * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
++ * 512 bytes of data must be aligned to a 16 byte boundary.
+  */
+-static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
++static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
+ {
+       if (likely(size < 16))
+-              return false;
++              return 1;
+       if (ctxt->d & Aligned)
+-              return true;
++              return size;
+       else if (ctxt->d & Unaligned)
+-              return false;
++              return 1;
+       else if (ctxt->d & Avx)
+-              return false;
++              return 1;
++      else if (ctxt->d & Aligned16)
++              return 16;
+       else
+-              return true;
++              return size;
+ }
+ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+@@ -692,7 +696,7 @@ static __always_inline int __linearize(s
+               }
+               break;
+       }
+-      if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
++      if (la & (insn_alignment(ctxt, size) - 1))
+               return emulate_gp(ctxt, 0);
+       return X86EMUL_CONTINUE;
+ bad:
diff --git a/queue-4.4/kvm-x86-add-asm_safe-wrapper.patch b/queue-4.4/kvm-x86-add-asm_safe-wrapper.patch
new file mode 100644 (file)
index 0000000..ea170c4
--- /dev/null
@@ -0,0 +1,77 @@
+From aabba3c6abd50b05b1fc2c6ec44244aa6bcda576 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Tue, 8 Nov 2016 20:54:18 +0100
+Subject: KVM: x86: add asm_safe wrapper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Radim Krčmář <rkrcmar@redhat.com>
+
+commit aabba3c6abd50b05b1fc2c6ec44244aa6bcda576 upstream.
+
+Move the existing exception handling for inline assembly into a macro
+and switch its return values to X86EMUL type.
+
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/emulate.c |   34 +++++++++++++++++++++++-----------
+ 1 file changed, 23 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -435,6 +435,26 @@ FOP_END;
+ FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+ FOP_END;
++/*
++ * XXX: inoutclob user must know where the argument is being expanded.
++ *      Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
++ */
++#define asm_safe(insn, inoutclob...) \
++({ \
++      int _fault = 0; \
++ \
++      asm volatile("1:" insn "\n" \
++                   "2:\n" \
++                   ".pushsection .fixup, \"ax\"\n" \
++                   "3: movl $1, %[_fault]\n" \
++                   "   jmp  2b\n" \
++                   ".popsection\n" \
++                   _ASM_EXTABLE(1b, 3b) \
++                   : [_fault] "+qm"(_fault) inoutclob ); \
++ \
++      _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
++})
++
+ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
+                                   enum x86_intercept intercept,
+                                   enum x86_intercept_stage stage)
+@@ -5086,21 +5106,13 @@ static bool string_insn_completed(struct
+ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
+ {
+-      bool fault = false;
++      int rc;
+       ctxt->ops->get_fpu(ctxt);
+-      asm volatile("1: fwait \n\t"
+-                   "2: \n\t"
+-                   ".pushsection .fixup,\"ax\" \n\t"
+-                   "3: \n\t"
+-                   "movb $1, %[fault] \n\t"
+-                   "jmp 2b \n\t"
+-                   ".popsection \n\t"
+-                   _ASM_EXTABLE(1b, 3b)
+-                   : [fault]"+qm"(fault));
++      rc = asm_safe("fwait");
+       ctxt->ops->put_fpu(ctxt);
+-      if (unlikely(fault))
++      if (unlikely(rc != X86EMUL_CONTINUE))
+               return emulate_exception(ctxt, MF_VECTOR, 0, false);
+       return X86EMUL_CONTINUE;
index a33b295f88d7ce168fdb95f1303f7273daa46631..13018a6a6536bd6ba86a3bbdab086878e3d11a51 100644 (file)
@@ -42,7 +42,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/emulate.c
 +++ b/arch/x86/kvm/emulate.c
-@@ -3858,6 +3858,131 @@ static int em_movsxd(struct x86_emulate_
+@@ -3882,6 +3882,131 @@ static int em_movsxd(struct x86_emulate_
        return X86EMUL_CONTINUE;
  }
  
@@ -174,7 +174,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static bool valid_cr(int nr)
  {
        switch (nr) {
-@@ -4210,7 +4335,9 @@ static const struct gprefix pfx_0f_ae_7
+@@ -4234,7 +4359,9 @@ static const struct gprefix pfx_0f_ae_7
  };
  
  static const struct group_dual group15 = { {
index 8f7475349083fcff796969091a72129a196fed89..35d0519e75fc32a81b1e7ebeaa3c5d8c572b0223 100644 (file)
@@ -31,7 +31,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/emulate.c
 +++ b/arch/x86/kvm/emulate.c
-@@ -779,6 +779,20 @@ static int segmented_read_std(struct x86
+@@ -803,6 +803,20 @@ static int segmented_read_std(struct x86
        return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
  }
  
@@ -52,7 +52,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  /*
   * Prefetch the remaining bytes of the instruction without crossing page
   * boundary if they are not in fetch_cache yet.
-@@ -3674,8 +3688,8 @@ static int emulate_store_desc_ptr(struct
+@@ -3698,8 +3712,8 @@ static int emulate_store_desc_ptr(struct
        }
        /* Disable writeback. */
        ctxt->dst.type = OP_NONE;
@@ -63,7 +63,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static int em_sgdt(struct x86_emulate_ctxt *ctxt)
-@@ -3921,7 +3935,7 @@ static int em_fxsave(struct x86_emulate_
+@@ -3945,7 +3959,7 @@ static int em_fxsave(struct x86_emulate_
        else
                size = offsetof(struct fxregs_state, xmm_space[0]);
  
@@ -72,7 +72,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  }
  
  static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
-@@ -3963,7 +3977,7 @@ static int em_fxrstor(struct x86_emulate
+@@ -3987,7 +4001,7 @@ static int em_fxrstor(struct x86_emulate
        if (rc != X86EMUL_CONTINUE)
                return rc;
  
index f7ee34c2d90dd98790721f427b212d31b08fd000..7b66c9fd899b1fa5fc4798dc054d694547fb95ec 100644 (file)
@@ -7,6 +7,9 @@ ocfs2-fix-crash-caused-by-stale-lvb-with-fsdlm-plugin.patch
 mm-hugetlb.c-fix-reservation-race-when-freeing-surplus-pages.patch
 kvm-x86-fix-emulation-of-mov-ss-null-selector.patch
 kvm-eventfd-fix-null-deref-irqbypass-consumer.patch
+jump_labels-api-for-flushing-deferred-jump-label-updates.patch
 kvm-x86-flush-pending-lapic-jump-label-updates-on-module-unload.patch
+kvm-x86-add-align16-instruction-flag.patch
+kvm-x86-add-asm_safe-wrapper.patch
 kvm-x86-emulate-fxsave-and-fxrstor.patch
 kvm-x86-introduce-segmented_write_std.patch