From: Greg Kroah-Hartman Date: Tue, 13 Jan 2015 22:21:29 +0000 (-0800) Subject: 3.18-stable patches X-Git-Tag: v3.10.65~33 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=9d8012c95a3a9f0db3cb048751c18c75162bec27;p=thirdparty%2Fkernel%2Fstable-queue.git 3.18-stable patches added patches: kvm-x86-em_ret_far-overrides-cpl.patch kvm-x86-support-xsaves-usage-in-the-host.patch x86-export-get_xsave_addr.patch --- diff --git a/queue-3.18/kvm-x86-em_ret_far-overrides-cpl.patch b/queue-3.18/kvm-x86-em_ret_far-overrides-cpl.patch new file mode 100644 index 00000000000..ea4a9f76153 --- /dev/null +++ b/queue-3.18/kvm-x86-em_ret_far-overrides-cpl.patch @@ -0,0 +1,32 @@ +From ab646f54f4fd1a8b9671b8707f0739fdd28ce2b1 Mon Sep 17 00:00:00 2001 +From: Nadav Amit +Date: Thu, 11 Dec 2014 12:27:14 +0100 +Subject: KVM: x86: em_ret_far overrides cpl + +From: Nadav Amit + +commit ab646f54f4fd1a8b9671b8707f0739fdd28ce2b1 upstream. + +commit d50eaa18039b ("KVM: x86: Perform limit checks when assigning EIP") +mistakenly used zero as cpl on em_ret_far. Use the actual one. + +Fixes: d50eaa18039b8b848c2285478d0775335ad5e930 +Signed-off-by: Nadav Amit +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kvm/emulate.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2128,7 +2128,7 @@ static int em_ret_far(struct x86_emulate + /* Outer-privilege level return is not implemented */ + if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) + return X86EMUL_UNHANDLEABLE; +- rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false, ++ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false, + &new_desc); + if (rc != X86EMUL_CONTINUE) + return rc; diff --git a/queue-3.18/kvm-x86-support-xsaves-usage-in-the-host.patch b/queue-3.18/kvm-x86-support-xsaves-usage-in-the-host.patch new file mode 100644 index 00000000000..11ab9d70c7b --- /dev/null +++ b/queue-3.18/kvm-x86-support-xsaves-usage-in-the-host.patch @@ -0,0 +1,148 @@ +From df1daba7d1cb8ed7957f873cde5c9e953cbaa483 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Fri, 21 Nov 2014 19:05:07 +0100 +Subject: KVM: x86: support XSAVES usage in the host +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Paolo Bonzini + +commit df1daba7d1cb8ed7957f873cde5c9e953cbaa483 upstream. + +Userspace is expecting non-compacted format for KVM_GET_XSAVE, but +struct xsave_struct might be using the compacted format. Convert +in order to preserve userspace ABI. + +Likewise, userspace is passing non-compacted format for KVM_SET_XSAVE +but the kernel will pass it to XRSTORS, and we need to convert back. + +Fixes: f31a9f7c71691569359fa7fb8b0acaa44bce0324 +Cc: Fenghua Yu +Cc: H. Peter Anvin +Tested-by: Nadav Amit +Reviewed-by: Radim Krčmář +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kvm/x86.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++----- + 1 file changed, 83 insertions(+), 7 deletions(-) + +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -3128,15 +3128,89 @@ static int kvm_vcpu_ioctl_x86_set_debugr + return 0; + } + ++#define XSTATE_COMPACTION_ENABLED (1ULL << 63) ++ ++static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) ++{ ++ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; ++ u64 xstate_bv = xsave->xsave_hdr.xstate_bv; ++ u64 valid; ++ ++ /* ++ * Copy legacy XSAVE area, to avoid complications with CPUID ++ * leaves 0 and 1 in the loop below. ++ */ ++ memcpy(dest, xsave, XSAVE_HDR_OFFSET); ++ ++ /* Set XSTATE_BV */ ++ *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; ++ ++ /* ++ * Copy each region from the possibly compacted offset to the ++ * non-compacted offset. ++ */ ++ valid = xstate_bv & ~XSTATE_FPSSE; ++ while (valid) { ++ u64 feature = valid & -valid; ++ int index = fls64(feature) - 1; ++ void *src = get_xsave_addr(xsave, feature); ++ ++ if (src) { ++ u32 size, offset, ecx, edx; ++ cpuid_count(XSTATE_CPUID, index, ++ &size, &offset, &ecx, &edx); ++ memcpy(dest + offset, src, size); ++ } ++ ++ valid -= feature; ++ } ++} ++ ++static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) ++{ ++ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; ++ u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); ++ u64 valid; ++ ++ /* ++ * Copy legacy XSAVE area, to avoid complications with CPUID ++ * leaves 0 and 1 in the loop below. ++ */ ++ memcpy(xsave, src, XSAVE_HDR_OFFSET); ++ ++ /* Set XSTATE_BV and possibly XCOMP_BV. */ ++ xsave->xsave_hdr.xstate_bv = xstate_bv; ++ if (cpu_has_xsaves) ++ xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; ++ ++ /* ++ * Copy each region from the non-compacted offset to the ++ * possibly compacted offset. ++ */ ++ valid = xstate_bv & ~XSTATE_FPSSE; ++ while (valid) { ++ u64 feature = valid & -valid; ++ int index = fls64(feature) - 1; ++ void *dest = get_xsave_addr(xsave, feature); ++ ++ if (dest) { ++ u32 size, offset, ecx, edx; ++ cpuid_count(XSTATE_CPUID, index, ++ &size, &offset, &ecx, &edx); ++ memcpy(dest, src + offset, size); ++ } else ++ WARN_ON_ONCE(1); ++ ++ valid -= feature; ++ } ++} ++ + static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, + struct kvm_xsave *guest_xsave) + { + if (cpu_has_xsave) { +- memcpy(guest_xsave->region, +- &vcpu->arch.guest_fpu.state->xsave, +- vcpu->arch.guest_xstate_size); +- *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= +- vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; ++ memset(guest_xsave, 0, sizeof(struct kvm_xsave)); ++ fill_xsave((u8 *) guest_xsave->region, vcpu); + } else { + memcpy(guest_xsave->region, + &vcpu->arch.guest_fpu.state->fxsave, +@@ -3160,8 +3234,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave( + */ + if (xstate_bv & ~kvm_supported_xcr0()) + return -EINVAL; +- memcpy(&vcpu->arch.guest_fpu.state->xsave, +- guest_xsave->region, vcpu->arch.guest_xstate_size); ++ load_xsave(vcpu, (u8 *)guest_xsave->region); + } else { + if (xstate_bv & ~XSTATE_FPSSE) + return -EINVAL; +@@ -6873,6 +6946,9 @@ int fx_init(struct kvm_vcpu *vcpu) + return err; + + fpu_finit(&vcpu->arch.guest_fpu); ++ if (cpu_has_xsaves) ++ vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv = ++ host_xcr0 | XSTATE_COMPACTION_ENABLED; + + /* + * Ensure guest xcr0 is valid for loading diff --git a/queue-3.18/series b/queue-3.18/series index 668496905b7..c59e131a29f 100644 --- a/queue-3.18/series +++ b/queue-3.18/series @@ -58,3 +58,6 @@ hid-roccat-potential-out-of-bounds-in-pyra_sysfs_write_settings.patch hid-i2c-hid-do-not-free-buffers-in-i2c_hid_stop.patch hid-add-battery-quirk-for-usb_device_id_apple_alu_wireless_2011_iso-keyboard.patch hid-add-a-new-id-0x501a-for-genius-mousepen-i608x.patch +x86-export-get_xsave_addr.patch +kvm-x86-support-xsaves-usage-in-the-host.patch +kvm-x86-em_ret_far-overrides-cpl.patch diff --git a/queue-3.18/x86-export-get_xsave_addr.patch b/queue-3.18/x86-export-get_xsave_addr.patch new file mode 100644 index 00000000000..ced45b80788 --- /dev/null +++ b/queue-3.18/x86-export-get_xsave_addr.patch @@ -0,0 +1,29 @@ +From ba7b39203a3a18018173b87e73f27169bd8e5147 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 24 Nov 2014 10:57:42 +0100 +Subject: x86: export get_xsave_addr + +From: Paolo Bonzini + +commit ba7b39203a3a18018173b87e73f27169bd8e5147 upstream. + +get_xsave_addr is the API to access XSAVE states, and KVM would +like to use it. Export it. + +Cc: x86@kernel.org +Cc: H. Peter Anvin +Acked-by: Thomas Gleixner +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/xsave.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/x86/kernel/xsave.c ++++ b/arch/x86/kernel/xsave.c +@@ -738,3 +738,4 @@ void *get_xsave_addr(struct xsave_struct + + return (void *)xsave + xstate_comp_offsets[feature]; + } ++EXPORT_SYMBOL_GPL(get_xsave_addr);