--- /dev/null
+From 1365039d0cb32c0cf96eb9f750f4277c9a90f87d Mon Sep 17 00:00:00 2001
+From: Christian Borntraeger <borntraeger@de.ibm.com>
+Date: Tue, 4 Nov 2014 08:31:16 +0100
+Subject: KVM: s390: Fix ipte locking
+
+From: Christian Borntraeger <borntraeger@de.ibm.com>
+
+commit 1365039d0cb32c0cf96eb9f750f4277c9a90f87d upstream.
+
+ipte_unlock_siif uses cmpxchg to replace the in-memory data of the ipte
+lock together with ACCESS_ONCE for the intial read.
+
+union ipte_control {
+ unsigned long val;
+ struct {
+ unsigned long k : 1;
+ unsigned long kh : 31;
+ unsigned long kg : 32;
+ };
+};
+[...]
+static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
+{
+ union ipte_control old, new, *ic;
+
+ ic = &vcpu->kvm->arch.sca->ipte_control;
+ do {
+ new = old = ACCESS_ONCE(*ic);
+ new.kh--;
+ if (!new.kh)
+ new.k = 0;
+ } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+ if (!new.kh)
+ wake_up(&vcpu->kvm->arch.ipte_wq);
+}
+
+The new value, is loaded twice from memory with gcc 4.7.2 of
+fedora 18, despite the ACCESS_ONCE:
+
+--->
+
+l %r4,0(%r3) <--- load first 32 bit of lock (k and kh) in r4
+alfi %r4,2147483647 <--- add -1 to r4
+llgtr %r4,%r4 <--- zero out the sign bit of r4
+lg %r1,0(%r3) <--- load all 64 bit of lock into new
+lgr %r2,%r1 <--- load the same into old
+risbg %r1,%r4,1,31,32 <--- shift and insert r4 into the bits 1-31 of
+new
+llihf %r4,2147483647
+ngrk %r4,%r1,%r4
+jne aa0 <ipte_unlock+0xf8>
+nihh %r1,32767
+lgr %r4,%r2
+csg %r4,%r1,0(%r3)
+cgr %r2,%r4
+jne a70 <ipte_unlock+0xc8>
+
+If the memory value changes between the first load (l) and the second
+load (lg) we are broken. If that happens VCPU threads will hang
+(unkillable) in handle_ipte_interlock.
+
+Andreas Krebbel analyzed this and tracked it down to a compiler bug in
+that version:
+"while it is not that obvious the C99 standard basically forbids
+duplicating the memory access also in that case. For an argumentation of
+a similiar case please see:
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=22278#c43
+
+For the implementation-defined cases regarding volatile there are some
+GCC-specific clarifications which can be found here:
+https://gcc.gnu.org/onlinedocs/gcc/Volatiles.html#Volatiles
+
+I've tracked down the problem with a reduced testcase. The problem was
+that during a tree level optimization (SRA - scalar replacement of
+aggregates) the volatile marker is lost. And an RTL level optimizer (CSE
+- common subexpression elimination) then propagated the memory read into
+ its second use introducing another access to the memory location. So
+indeed Christian's suspicion that the union access has something to do
+with it is correct (since it triggered the SRA optimization).
+
+This issue has been reported and fixed in the GCC 4.8 development cycle:
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145"
+
+This patch replaces the ACCESS_ONCE scheme with a barrier() based scheme
+that should work for all supported compilers.
+
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/gaccess.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -229,10 +229,12 @@ static void ipte_lock_simple(struct kvm_
+ goto out;
+ ic = &vcpu->kvm->arch.sca->ipte_control;
+ do {
+- old = ACCESS_ONCE(*ic);
++ old = *ic;
++ barrier();
+ while (old.k) {
+ cond_resched();
+- old = ACCESS_ONCE(*ic);
++ old = *ic;
++ barrier();
+ }
+ new = old;
+ new.k = 1;
+@@ -251,7 +253,9 @@ static void ipte_unlock_simple(struct kv
+ goto out;
+ ic = &vcpu->kvm->arch.sca->ipte_control;
+ do {
+- new = old = ACCESS_ONCE(*ic);
++ old = *ic;
++ barrier();
++ new = old;
+ new.k = 0;
+ } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+ wake_up(&vcpu->kvm->arch.ipte_wq);
+@@ -265,10 +269,12 @@ static void ipte_lock_siif(struct kvm_vc
+
+ ic = &vcpu->kvm->arch.sca->ipte_control;
+ do {
+- old = ACCESS_ONCE(*ic);
++ old = *ic;
++ barrier();
+ while (old.kg) {
+ cond_resched();
+- old = ACCESS_ONCE(*ic);
++ old = *ic;
++ barrier();
+ }
+ new = old;
+ new.k = 1;
+@@ -282,7 +288,9 @@ static void ipte_unlock_siif(struct kvm_
+
+ ic = &vcpu->kvm->arch.sca->ipte_control;
+ do {
+- new = old = ACCESS_ONCE(*ic);
++ old = *ic;
++ barrier();
++ new = old;
+ new.kh--;
+ if (!new.kh)
+ new.k = 0;
--- /dev/null
+From a36c5393266222129ce6f622e3bc3fb5463f290c Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@linux.vnet.ibm.com>
+Date: Thu, 16 Oct 2014 14:31:53 +0200
+Subject: KVM: s390: Fix size of monitor-class number field
+
+From: Thomas Huth <thuth@linux.vnet.ibm.com>
+
+commit a36c5393266222129ce6f622e3bc3fb5463f290c upstream.
+
+The monitor-class number field is only 16 bits, so we have to use
+a u16 pointer to access it.
+
+Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com>
+Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/interrupt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -270,7 +270,7 @@ static int __must_check __deliver_prog_i
+ break;
+ case PGM_MONITOR:
+ rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
+- (u64 *)__LC_MON_CLASS_NR);
++ (u16 *)__LC_MON_CLASS_NR);
+ rc |= put_guest_lc(vcpu, pgm_info->mon_code,
+ (u64 *)__LC_MON_CODE);
+ break;
--- /dev/null
+From 2dca485f8740208604543c3960be31a5dd3ea603 Mon Sep 17 00:00:00 2001
+From: Christian Borntraeger <borntraeger@de.ibm.com>
+Date: Fri, 31 Oct 2014 09:24:20 +0100
+Subject: KVM: s390: flush CPU on load control
+
+From: Christian Borntraeger <borntraeger@de.ibm.com>
+
+commit 2dca485f8740208604543c3960be31a5dd3ea603 upstream.
+
+some control register changes will flush some aspects of the CPU, e.g.
+POP explicitely mentions that for CR9-CR11 "TLBs may be cleared".
+Instead of trying to be clever and only flush on specific CRs, let
+play safe and flush on all lctl(g) as future machines might define
+new bits in CRs. Load control intercept should not happen that often.
+
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/priv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -791,7 +791,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu
+ break;
+ reg = (reg + 1) % 16;
+ } while (1);
+-
++ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ return 0;
+ }
+
+@@ -863,7 +863,7 @@ static int handle_lctlg(struct kvm_vcpu
+ break;
+ reg = (reg + 1) % 16;
+ } while (1);
+-
++ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ return 0;
+ }
+
--- /dev/null
+From b65d6e17fe2239c9b2051727903955d922083fbf Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 21 Nov 2014 18:13:26 +0100
+Subject: kvm: x86: mask out XSAVES
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit b65d6e17fe2239c9b2051727903955d922083fbf upstream.
+
+This feature is not supported inside KVM guests yet, because we do not emulate
+MSR_IA32_XSS. Mask it out.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -319,6 +319,10 @@ static inline int __do_cpuid_ent(struct
+ F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
+ F(ADX) | F(SMAP);
+
++ /* cpuid 0xD.1.eax */
++ const u32 kvm_supported_word10_x86_features =
++ F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1);
++
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+
+@@ -455,13 +459,18 @@ static inline int __do_cpuid_ent(struct
+ entry->eax &= supported;
+ entry->edx &= supported >> 32;
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++ if (!supported)
++ break;
++
+ for (idx = 1, i = 1; idx < 64; ++idx) {
+ u64 mask = ((u64)1 << idx);
+ if (*nent >= maxnent)
+ goto out;
+
+ do_cpuid_1_ent(&entry[i], function, idx);
+- if (entry[i].eax == 0 || !(supported & mask))
++ if (idx == 1)
++ entry[i].eax &= kvm_supported_word10_x86_features;
++ else if (entry[i].eax == 0 || !(supported & mask))
+ continue;
+ entry[i].flags |=
+ KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
x86-export-get_xsave_addr.patch
kvm-x86-support-xsaves-usage-in-the-host.patch
kvm-x86-em_ret_far-overrides-cpl.patch
+kvm-x86-mask-out-xsaves.patch
+kvm-s390-fix-size-of-monitor-class-number-field.patch
+kvm-s390-flush-cpu-on-load-control.patch
+kvm-s390-fix-ipte-locking.patch